v1

package
v0.32.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 29, 2023 License: Apache-2.0 Imports: 9 Imported by: 0

Documentation

Index

Constants

View Source
const (
	// If unspecified, Compute Engine default behavior will apply, which is the same as INHERIT_FROM_SUBNETWORK.
	GceClusterConfigPrivateIpv6GoogleAccessPrivateIpv6GoogleAccessUnspecified = GceClusterConfigPrivateIpv6GoogleAccess("PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED")
	// Private access to and from Google Services configuration inherited from the subnetwork configuration. This is the default Compute Engine behavior.
	GceClusterConfigPrivateIpv6GoogleAccessInheritFromSubnetwork = GceClusterConfigPrivateIpv6GoogleAccess("INHERIT_FROM_SUBNETWORK")
	// Enables outbound private IPv6 access to Google Services from the Dataproc cluster.
	GceClusterConfigPrivateIpv6GoogleAccessOutbound = GceClusterConfigPrivateIpv6GoogleAccess("OUTBOUND")
	// Enables bidirectional private IPv6 access between Google Services and the Dataproc cluster.
	GceClusterConfigPrivateIpv6GoogleAccessBidirectional = GceClusterConfigPrivateIpv6GoogleAccess("BIDIRECTIONAL")
)
View Source
const (
	// Role is unspecified.
	GkeNodePoolTargetRolesItemRoleUnspecified = GkeNodePoolTargetRolesItem("ROLE_UNSPECIFIED")
	// At least one node pool must have the DEFAULT role. Work assigned to a role that is not associated with a node pool is assigned to the node pool with the DEFAULT role. For example, work assigned to the CONTROLLER role will be assigned to the node pool with the DEFAULT role if no node pool has the CONTROLLER role.
	GkeNodePoolTargetRolesItemDefault = GkeNodePoolTargetRolesItem("DEFAULT")
	// Run work associated with the Dataproc control plane (for example, controllers and webhooks). Very low resource requirements.
	GkeNodePoolTargetRolesItemController = GkeNodePoolTargetRolesItem("CONTROLLER")
	// Run work associated with a Spark driver of a job.
	GkeNodePoolTargetRolesItemSparkDriver = GkeNodePoolTargetRolesItem("SPARK_DRIVER")
	// Run work associated with a Spark executor of a job.
	GkeNodePoolTargetRolesItemSparkExecutor = GkeNodePoolTargetRolesItem("SPARK_EXECUTOR")
)
View Source
const (
	// Preemptibility is unspecified, the system will choose the appropriate setting for each instance group.
	InstanceGroupConfigPreemptibilityPreemptibilityUnspecified = InstanceGroupConfigPreemptibility("PREEMPTIBILITY_UNSPECIFIED")
	// Instances are non-preemptible.This option is allowed for all instance groups and is the only valid value for Master and Worker instance groups.
	InstanceGroupConfigPreemptibilityNonPreemptible = InstanceGroupConfigPreemptibility("NON_PREEMPTIBLE")
	// Instances are preemptible (https://cloud.google.com/compute/docs/instances/preemptible).This option is allowed only for secondary worker (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) groups.
	InstanceGroupConfigPreemptibilityPreemptible = InstanceGroupConfigPreemptibility("PREEMPTIBLE")
	// Instances are Spot VMs (https://cloud.google.com/compute/docs/instances/spot).This option is allowed only for secondary worker (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) groups. Spot VMs are the latest version of preemptible VMs (https://cloud.google.com/compute/docs/instances/preemptible), and provide additional features.
	InstanceGroupConfigPreemptibilitySpot = InstanceGroupConfigPreemptibility("SPOT")
)
View Source
const (
	// The kernel is unknown.
	JupyterConfigKernelKernelUnspecified = JupyterConfigKernel("KERNEL_UNSPECIFIED")
	// Python kernel.
	JupyterConfigKernelPython = JupyterConfigKernel("PYTHON")
	// Scala kernel.
	JupyterConfigKernelScala = JupyterConfigKernel("SCALA")
)
View Source
const (
	// Required unspecified metric source.
	MetricMetricSourceMetricSourceUnspecified = MetricMetricSource("METRIC_SOURCE_UNSPECIFIED")
	// Monitoring agent metrics. If this source is enabled, Dataproc enables the monitoring agent in Compute Engine, and collects monitoring agent metrics, which are published with an agent.googleapis.com prefix.
	MetricMetricSourceMonitoringAgentDefaults = MetricMetricSource("MONITORING_AGENT_DEFAULTS")
	// HDFS metric source.
	MetricMetricSourceHdfs = MetricMetricSource("HDFS")
	// Spark metric source.
	MetricMetricSourceSpark = MetricMetricSource("SPARK")
	// YARN metric source.
	MetricMetricSourceYarn = MetricMetricSource("YARN")
	// Spark History Server metric source.
	MetricMetricSourceSparkHistoryServer = MetricMetricSource("SPARK_HISTORY_SERVER")
	// Hiveserver2 metric source.
	MetricMetricSourceHiveserver2 = MetricMetricSource("HIVESERVER2")
	// hivemetastore metric source
	MetricMetricSourceHivemetastore = MetricMetricSource("HIVEMETASTORE")
	// flink metric source
	MetricMetricSourceFlink = MetricMetricSource("FLINK")
)
View Source
const (
	// Required unspecified role.
	NodeGroupRolesItemRoleUnspecified = NodeGroupRolesItem("ROLE_UNSPECIFIED")
	// Job drivers run on the node pool.
	NodeGroupRolesItemDriver = NodeGroupRolesItem("DRIVER")
)
View Source
const (
	ReservationAffinityConsumeReservationTypeTypeUnspecified = ReservationAffinityConsumeReservationType("TYPE_UNSPECIFIED")
	// Do not consume from any allocated capacity.
	ReservationAffinityConsumeReservationTypeNoReservation = ReservationAffinityConsumeReservationType("NO_RESERVATION")
	// Consume any reservation available.
	ReservationAffinityConsumeReservationTypeAnyReservation = ReservationAffinityConsumeReservationType("ANY_RESERVATION")
	// Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
	ReservationAffinityConsumeReservationTypeSpecificReservation = ReservationAffinityConsumeReservationType("SPECIFIC_RESERVATION")
)
View Source
const (
	// Unspecified component. Specifying this will cause Cluster creation to fail.
	SoftwareConfigOptionalComponentsItemComponentUnspecified = SoftwareConfigOptionalComponentsItem("COMPONENT_UNSPECIFIED")
	// The Anaconda python distribution. The Anaconda component is not supported in the Dataproc 2.0 image. The 2.0 image is pre-installed with Miniconda.
	SoftwareConfigOptionalComponentsItemAnaconda = SoftwareConfigOptionalComponentsItem("ANACONDA")
	// Docker
	SoftwareConfigOptionalComponentsItemDocker = SoftwareConfigOptionalComponentsItem("DOCKER")
	// The Druid query engine. (alpha)
	SoftwareConfigOptionalComponentsItemDruid = SoftwareConfigOptionalComponentsItem("DRUID")
	// Flink
	SoftwareConfigOptionalComponentsItemFlink = SoftwareConfigOptionalComponentsItem("FLINK")
	// HBase. (beta)
	SoftwareConfigOptionalComponentsItemHbase = SoftwareConfigOptionalComponentsItem("HBASE")
	// The Hive Web HCatalog (the REST service for accessing HCatalog).
	SoftwareConfigOptionalComponentsItemHiveWebhcat = SoftwareConfigOptionalComponentsItem("HIVE_WEBHCAT")
	// Hudi.
	SoftwareConfigOptionalComponentsItemHudi = SoftwareConfigOptionalComponentsItem("HUDI")
	// The Jupyter Notebook.
	SoftwareConfigOptionalComponentsItemJupyter = SoftwareConfigOptionalComponentsItem("JUPYTER")
	// The Presto query engine.
	SoftwareConfigOptionalComponentsItemPresto = SoftwareConfigOptionalComponentsItem("PRESTO")
	// The Trino query engine.
	SoftwareConfigOptionalComponentsItemTrino = SoftwareConfigOptionalComponentsItem("TRINO")
	// The Ranger service.
	SoftwareConfigOptionalComponentsItemRanger = SoftwareConfigOptionalComponentsItem("RANGER")
	// The Solr service.
	SoftwareConfigOptionalComponentsItemSolr = SoftwareConfigOptionalComponentsItem("SOLR")
	// The Zeppelin notebook.
	SoftwareConfigOptionalComponentsItemZeppelin = SoftwareConfigOptionalComponentsItem("ZEPPELIN")
	// The Zookeeper service.
	SoftwareConfigOptionalComponentsItemZookeeper = SoftwareConfigOptionalComponentsItem("ZOOKEEPER")
)

Variables

This section is empty.

Functions

This section is empty.

Types

type AcceleratorConfig

type AcceleratorConfig struct {
	// The number of the accelerator cards of this type exposed to this instance.
	AcceleratorCount *int `pulumi:"acceleratorCount"`
	// Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-k80 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-k80 nvidia-tesla-k80Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
	AcceleratorTypeUri *string `pulumi:"acceleratorTypeUri"`
}

Specifies the type and number of accelerator cards attached to the instances of an instance. See GPUs on Compute Engine (https://cloud.google.com/compute/docs/gpus/).

type AcceleratorConfigArgs

type AcceleratorConfigArgs struct {
	// The number of the accelerator cards of this type exposed to this instance.
	AcceleratorCount pulumi.IntPtrInput `pulumi:"acceleratorCount"`
	// Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-k80 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-k80 nvidia-tesla-k80Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
	AcceleratorTypeUri pulumi.StringPtrInput `pulumi:"acceleratorTypeUri"`
}

Specifies the type and number of accelerator cards attached to the instances of an instance. See GPUs on Compute Engine (https://cloud.google.com/compute/docs/gpus/).

func (AcceleratorConfigArgs) ElementType

func (AcceleratorConfigArgs) ElementType() reflect.Type

func (AcceleratorConfigArgs) ToAcceleratorConfigOutput

func (i AcceleratorConfigArgs) ToAcceleratorConfigOutput() AcceleratorConfigOutput

func (AcceleratorConfigArgs) ToAcceleratorConfigOutputWithContext

func (i AcceleratorConfigArgs) ToAcceleratorConfigOutputWithContext(ctx context.Context) AcceleratorConfigOutput

type AcceleratorConfigArray

type AcceleratorConfigArray []AcceleratorConfigInput

func (AcceleratorConfigArray) ElementType

func (AcceleratorConfigArray) ElementType() reflect.Type

func (AcceleratorConfigArray) ToAcceleratorConfigArrayOutput

func (i AcceleratorConfigArray) ToAcceleratorConfigArrayOutput() AcceleratorConfigArrayOutput

func (AcceleratorConfigArray) ToAcceleratorConfigArrayOutputWithContext

func (i AcceleratorConfigArray) ToAcceleratorConfigArrayOutputWithContext(ctx context.Context) AcceleratorConfigArrayOutput

type AcceleratorConfigArrayInput

type AcceleratorConfigArrayInput interface {
	pulumi.Input

	ToAcceleratorConfigArrayOutput() AcceleratorConfigArrayOutput
	ToAcceleratorConfigArrayOutputWithContext(context.Context) AcceleratorConfigArrayOutput
}

AcceleratorConfigArrayInput is an input type that accepts AcceleratorConfigArray and AcceleratorConfigArrayOutput values. You can construct a concrete instance of `AcceleratorConfigArrayInput` via:

AcceleratorConfigArray{ AcceleratorConfigArgs{...} }

type AcceleratorConfigArrayOutput

type AcceleratorConfigArrayOutput struct{ *pulumi.OutputState }

func (AcceleratorConfigArrayOutput) ElementType

func (AcceleratorConfigArrayOutput) Index

func (AcceleratorConfigArrayOutput) ToAcceleratorConfigArrayOutput

func (o AcceleratorConfigArrayOutput) ToAcceleratorConfigArrayOutput() AcceleratorConfigArrayOutput

func (AcceleratorConfigArrayOutput) ToAcceleratorConfigArrayOutputWithContext

func (o AcceleratorConfigArrayOutput) ToAcceleratorConfigArrayOutputWithContext(ctx context.Context) AcceleratorConfigArrayOutput

type AcceleratorConfigInput

type AcceleratorConfigInput interface {
	pulumi.Input

	ToAcceleratorConfigOutput() AcceleratorConfigOutput
	ToAcceleratorConfigOutputWithContext(context.Context) AcceleratorConfigOutput
}

AcceleratorConfigInput is an input type that accepts AcceleratorConfigArgs and AcceleratorConfigOutput values. You can construct a concrete instance of `AcceleratorConfigInput` via:

AcceleratorConfigArgs{...}

type AcceleratorConfigOutput

type AcceleratorConfigOutput struct{ *pulumi.OutputState }

Specifies the type and number of accelerator cards attached to the instances of an instance. See GPUs on Compute Engine (https://cloud.google.com/compute/docs/gpus/).

func (AcceleratorConfigOutput) AcceleratorCount

func (o AcceleratorConfigOutput) AcceleratorCount() pulumi.IntPtrOutput

The number of the accelerator cards of this type exposed to this instance.

func (AcceleratorConfigOutput) AcceleratorTypeUri

func (o AcceleratorConfigOutput) AcceleratorTypeUri() pulumi.StringPtrOutput

Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-k80 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-k80 nvidia-tesla-k80Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.

func (AcceleratorConfigOutput) ElementType

func (AcceleratorConfigOutput) ElementType() reflect.Type

func (AcceleratorConfigOutput) ToAcceleratorConfigOutput

func (o AcceleratorConfigOutput) ToAcceleratorConfigOutput() AcceleratorConfigOutput

func (AcceleratorConfigOutput) ToAcceleratorConfigOutputWithContext

func (o AcceleratorConfigOutput) ToAcceleratorConfigOutputWithContext(ctx context.Context) AcceleratorConfigOutput

type AcceleratorConfigResponse

type AcceleratorConfigResponse struct {
	// The number of the accelerator cards of this type exposed to this instance.
	AcceleratorCount int `pulumi:"acceleratorCount"`
	// Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-k80 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-k80 nvidia-tesla-k80Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
	AcceleratorTypeUri string `pulumi:"acceleratorTypeUri"`
}

Specifies the type and number of accelerator cards attached to the instances of an instance. See GPUs on Compute Engine (https://cloud.google.com/compute/docs/gpus/).

type AcceleratorConfigResponseArrayOutput

type AcceleratorConfigResponseArrayOutput struct{ *pulumi.OutputState }

func (AcceleratorConfigResponseArrayOutput) ElementType

func (AcceleratorConfigResponseArrayOutput) Index

func (AcceleratorConfigResponseArrayOutput) ToAcceleratorConfigResponseArrayOutput

func (o AcceleratorConfigResponseArrayOutput) ToAcceleratorConfigResponseArrayOutput() AcceleratorConfigResponseArrayOutput

func (AcceleratorConfigResponseArrayOutput) ToAcceleratorConfigResponseArrayOutputWithContext

func (o AcceleratorConfigResponseArrayOutput) ToAcceleratorConfigResponseArrayOutputWithContext(ctx context.Context) AcceleratorConfigResponseArrayOutput

type AcceleratorConfigResponseOutput

type AcceleratorConfigResponseOutput struct{ *pulumi.OutputState }

Specifies the type and number of accelerator cards attached to the instances of an instance. See GPUs on Compute Engine (https://cloud.google.com/compute/docs/gpus/).

func (AcceleratorConfigResponseOutput) AcceleratorCount

func (o AcceleratorConfigResponseOutput) AcceleratorCount() pulumi.IntOutput

The number of the accelerator cards of this type exposed to this instance.

func (AcceleratorConfigResponseOutput) AcceleratorTypeUri

func (o AcceleratorConfigResponseOutput) AcceleratorTypeUri() pulumi.StringOutput

Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-k80 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-k80 nvidia-tesla-k80Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.

func (AcceleratorConfigResponseOutput) ElementType

func (AcceleratorConfigResponseOutput) ToAcceleratorConfigResponseOutput

func (o AcceleratorConfigResponseOutput) ToAcceleratorConfigResponseOutput() AcceleratorConfigResponseOutput

func (AcceleratorConfigResponseOutput) ToAcceleratorConfigResponseOutputWithContext

func (o AcceleratorConfigResponseOutput) ToAcceleratorConfigResponseOutputWithContext(ctx context.Context) AcceleratorConfigResponseOutput

type AutoscalingConfig

type AutoscalingConfig struct {
	// Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region.
	PolicyUri *string `pulumi:"policyUri"`
}

Autoscaling Policy config associated with the cluster.

type AutoscalingConfigArgs

type AutoscalingConfigArgs struct {
	// Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region.
	PolicyUri pulumi.StringPtrInput `pulumi:"policyUri"`
}

Autoscaling Policy config associated with the cluster.

func (AutoscalingConfigArgs) ElementType

func (AutoscalingConfigArgs) ElementType() reflect.Type

func (AutoscalingConfigArgs) ToAutoscalingConfigOutput

func (i AutoscalingConfigArgs) ToAutoscalingConfigOutput() AutoscalingConfigOutput

func (AutoscalingConfigArgs) ToAutoscalingConfigOutputWithContext

func (i AutoscalingConfigArgs) ToAutoscalingConfigOutputWithContext(ctx context.Context) AutoscalingConfigOutput

func (AutoscalingConfigArgs) ToAutoscalingConfigPtrOutput

func (i AutoscalingConfigArgs) ToAutoscalingConfigPtrOutput() AutoscalingConfigPtrOutput

func (AutoscalingConfigArgs) ToAutoscalingConfigPtrOutputWithContext

func (i AutoscalingConfigArgs) ToAutoscalingConfigPtrOutputWithContext(ctx context.Context) AutoscalingConfigPtrOutput

type AutoscalingConfigInput

type AutoscalingConfigInput interface {
	pulumi.Input

	ToAutoscalingConfigOutput() AutoscalingConfigOutput
	ToAutoscalingConfigOutputWithContext(context.Context) AutoscalingConfigOutput
}

AutoscalingConfigInput is an input type that accepts AutoscalingConfigArgs and AutoscalingConfigOutput values. You can construct a concrete instance of `AutoscalingConfigInput` via:

AutoscalingConfigArgs{...}

type AutoscalingConfigOutput

type AutoscalingConfigOutput struct{ *pulumi.OutputState }

Autoscaling Policy config associated with the cluster.

func (AutoscalingConfigOutput) ElementType

func (AutoscalingConfigOutput) ElementType() reflect.Type

func (AutoscalingConfigOutput) PolicyUri

Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region.

func (AutoscalingConfigOutput) ToAutoscalingConfigOutput

func (o AutoscalingConfigOutput) ToAutoscalingConfigOutput() AutoscalingConfigOutput

func (AutoscalingConfigOutput) ToAutoscalingConfigOutputWithContext

func (o AutoscalingConfigOutput) ToAutoscalingConfigOutputWithContext(ctx context.Context) AutoscalingConfigOutput

func (AutoscalingConfigOutput) ToAutoscalingConfigPtrOutput

func (o AutoscalingConfigOutput) ToAutoscalingConfigPtrOutput() AutoscalingConfigPtrOutput

func (AutoscalingConfigOutput) ToAutoscalingConfigPtrOutputWithContext

func (o AutoscalingConfigOutput) ToAutoscalingConfigPtrOutputWithContext(ctx context.Context) AutoscalingConfigPtrOutput

type AutoscalingConfigPtrInput

type AutoscalingConfigPtrInput interface {
	pulumi.Input

	ToAutoscalingConfigPtrOutput() AutoscalingConfigPtrOutput
	ToAutoscalingConfigPtrOutputWithContext(context.Context) AutoscalingConfigPtrOutput
}

AutoscalingConfigPtrInput is an input type that accepts AutoscalingConfigArgs, AutoscalingConfigPtr and AutoscalingConfigPtrOutput values. You can construct a concrete instance of `AutoscalingConfigPtrInput` via:

        AutoscalingConfigArgs{...}

or:

        nil

type AutoscalingConfigPtrOutput

type AutoscalingConfigPtrOutput struct{ *pulumi.OutputState }

func (AutoscalingConfigPtrOutput) Elem

func (AutoscalingConfigPtrOutput) ElementType

func (AutoscalingConfigPtrOutput) ElementType() reflect.Type

func (AutoscalingConfigPtrOutput) PolicyUri

Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region.

func (AutoscalingConfigPtrOutput) ToAutoscalingConfigPtrOutput

func (o AutoscalingConfigPtrOutput) ToAutoscalingConfigPtrOutput() AutoscalingConfigPtrOutput

func (AutoscalingConfigPtrOutput) ToAutoscalingConfigPtrOutputWithContext

func (o AutoscalingConfigPtrOutput) ToAutoscalingConfigPtrOutputWithContext(ctx context.Context) AutoscalingConfigPtrOutput

type AutoscalingConfigResponse

type AutoscalingConfigResponse struct {
	// Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region.
	PolicyUri string `pulumi:"policyUri"`
}

Autoscaling Policy config associated with the cluster.

type AutoscalingConfigResponseOutput

type AutoscalingConfigResponseOutput struct{ *pulumi.OutputState }

Autoscaling Policy config associated with the cluster.

func (AutoscalingConfigResponseOutput) ElementType

func (AutoscalingConfigResponseOutput) PolicyUri

Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region.

func (AutoscalingConfigResponseOutput) ToAutoscalingConfigResponseOutput

func (o AutoscalingConfigResponseOutput) ToAutoscalingConfigResponseOutput() AutoscalingConfigResponseOutput

func (AutoscalingConfigResponseOutput) ToAutoscalingConfigResponseOutputWithContext

func (o AutoscalingConfigResponseOutput) ToAutoscalingConfigResponseOutputWithContext(ctx context.Context) AutoscalingConfigResponseOutput

type AutoscalingPolicy

type AutoscalingPolicy struct {
	pulumi.CustomResourceState

	BasicAlgorithm BasicAutoscalingAlgorithmResponseOutput `pulumi:"basicAlgorithm"`
	// Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy.
	Labels   pulumi.StringMapOutput `pulumi:"labels"`
	Location pulumi.StringOutput    `pulumi:"location"`
	// The "resource name" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}
	Name    pulumi.StringOutput `pulumi:"name"`
	Project pulumi.StringOutput `pulumi:"project"`
	// Optional. Describes how the autoscaler will operate for secondary workers.
	SecondaryWorkerConfig InstanceGroupAutoscalingPolicyConfigResponseOutput `pulumi:"secondaryWorkerConfig"`
	// Describes how the autoscaler will operate for primary workers.
	WorkerConfig InstanceGroupAutoscalingPolicyConfigResponseOutput `pulumi:"workerConfig"`
}

Creates new autoscaling policy. Auto-naming is currently not supported for this resource.

func GetAutoscalingPolicy

func GetAutoscalingPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *AutoscalingPolicyState, opts ...pulumi.ResourceOption) (*AutoscalingPolicy, error)

GetAutoscalingPolicy gets an existing AutoscalingPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewAutoscalingPolicy

func NewAutoscalingPolicy(ctx *pulumi.Context,
	name string, args *AutoscalingPolicyArgs, opts ...pulumi.ResourceOption) (*AutoscalingPolicy, error)

NewAutoscalingPolicy registers a new resource with the given unique name, arguments, and options.

func (*AutoscalingPolicy) ElementType

func (*AutoscalingPolicy) ElementType() reflect.Type

func (*AutoscalingPolicy) ToAutoscalingPolicyOutput

func (i *AutoscalingPolicy) ToAutoscalingPolicyOutput() AutoscalingPolicyOutput

func (*AutoscalingPolicy) ToAutoscalingPolicyOutputWithContext

func (i *AutoscalingPolicy) ToAutoscalingPolicyOutputWithContext(ctx context.Context) AutoscalingPolicyOutput

type AutoscalingPolicyArgs

type AutoscalingPolicyArgs struct {
	BasicAlgorithm BasicAutoscalingAlgorithmPtrInput
	// The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.
	Id pulumi.StringInput
	// Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy.
	Labels   pulumi.StringMapInput
	Location pulumi.StringPtrInput
	Project  pulumi.StringPtrInput
	// Optional. Describes how the autoscaler will operate for secondary workers.
	SecondaryWorkerConfig InstanceGroupAutoscalingPolicyConfigPtrInput
	// Describes how the autoscaler will operate for primary workers.
	WorkerConfig InstanceGroupAutoscalingPolicyConfigInput
}

The set of arguments for constructing a AutoscalingPolicy resource.

func (AutoscalingPolicyArgs) ElementType

func (AutoscalingPolicyArgs) ElementType() reflect.Type

type AutoscalingPolicyIamBinding added in v0.26.0

type AutoscalingPolicyIamBinding struct {
	pulumi.CustomResourceState

	// An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
	Condition iam.ConditionPtrOutput `pulumi:"condition"`
	// The etag of the resource's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Members pulumi.StringArrayOutput `pulumi:"members"`
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringOutput `pulumi:"name"`
	// The project in which the resource belongs. If it is not provided, a default will be supplied.
	Project pulumi.StringOutput `pulumi:"project"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role pulumi.StringOutput `pulumi:"role"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.

func GetAutoscalingPolicyIamBinding added in v0.26.0

func GetAutoscalingPolicyIamBinding(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *AutoscalingPolicyIamBindingState, opts ...pulumi.ResourceOption) (*AutoscalingPolicyIamBinding, error)

GetAutoscalingPolicyIamBinding gets an existing AutoscalingPolicyIamBinding resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewAutoscalingPolicyIamBinding added in v0.26.0

func NewAutoscalingPolicyIamBinding(ctx *pulumi.Context,
	name string, args *AutoscalingPolicyIamBindingArgs, opts ...pulumi.ResourceOption) (*AutoscalingPolicyIamBinding, error)

NewAutoscalingPolicyIamBinding registers a new resource with the given unique name, arguments, and options.

func (*AutoscalingPolicyIamBinding) ElementType added in v0.26.0

func (*AutoscalingPolicyIamBinding) ElementType() reflect.Type

func (*AutoscalingPolicyIamBinding) ToAutoscalingPolicyIamBindingOutput added in v0.26.0

func (i *AutoscalingPolicyIamBinding) ToAutoscalingPolicyIamBindingOutput() AutoscalingPolicyIamBindingOutput

func (*AutoscalingPolicyIamBinding) ToAutoscalingPolicyIamBindingOutputWithContext added in v0.26.0

func (i *AutoscalingPolicyIamBinding) ToAutoscalingPolicyIamBindingOutputWithContext(ctx context.Context) AutoscalingPolicyIamBindingOutput

type AutoscalingPolicyIamBindingArgs added in v0.26.0

type AutoscalingPolicyIamBindingArgs struct {
	// An IAM Condition for a given binding.
	Condition iam.ConditionPtrInput
	// Identities that will be granted the privilege in role. Each entry can have one of the following values:
	//
	//  * user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
	//  * serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
	//  * group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
	//  * domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
	Members pulumi.StringArrayInput
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringInput
	// The role that should be applied. Only one `IamBinding` can be used per role.
	Role pulumi.StringInput
}

The set of arguments for constructing a AutoscalingPolicyIamBinding resource.

func (AutoscalingPolicyIamBindingArgs) ElementType added in v0.26.0

type AutoscalingPolicyIamBindingInput added in v0.26.0

type AutoscalingPolicyIamBindingInput interface {
	pulumi.Input

	ToAutoscalingPolicyIamBindingOutput() AutoscalingPolicyIamBindingOutput
	ToAutoscalingPolicyIamBindingOutputWithContext(ctx context.Context) AutoscalingPolicyIamBindingOutput
}

type AutoscalingPolicyIamBindingOutput added in v0.26.0

type AutoscalingPolicyIamBindingOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyIamBindingOutput) Condition added in v0.26.0

An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.

func (AutoscalingPolicyIamBindingOutput) ElementType added in v0.26.0

func (AutoscalingPolicyIamBindingOutput) Etag added in v0.26.0

The etag of the resource's IAM policy.

func (AutoscalingPolicyIamBindingOutput) Members added in v0.26.0

Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.

func (AutoscalingPolicyIamBindingOutput) Name added in v0.26.0

The name of the resource to manage IAM policies for.

func (AutoscalingPolicyIamBindingOutput) Project added in v0.26.0

The project in which the resource belongs. If it is not provided, a default will be supplied.

func (AutoscalingPolicyIamBindingOutput) Role added in v0.26.0

Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.

func (AutoscalingPolicyIamBindingOutput) ToAutoscalingPolicyIamBindingOutput added in v0.26.0

func (o AutoscalingPolicyIamBindingOutput) ToAutoscalingPolicyIamBindingOutput() AutoscalingPolicyIamBindingOutput

func (AutoscalingPolicyIamBindingOutput) ToAutoscalingPolicyIamBindingOutputWithContext added in v0.26.0

func (o AutoscalingPolicyIamBindingOutput) ToAutoscalingPolicyIamBindingOutputWithContext(ctx context.Context) AutoscalingPolicyIamBindingOutput

type AutoscalingPolicyIamBindingState added in v0.26.0

type AutoscalingPolicyIamBindingState struct {
}

func (AutoscalingPolicyIamBindingState) ElementType added in v0.26.0

type AutoscalingPolicyIamMember added in v0.26.0

type AutoscalingPolicyIamMember struct {
	pulumi.CustomResourceState

	// An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
	Condition iam.ConditionPtrOutput `pulumi:"condition"`
	// The etag of the resource's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Member pulumi.StringOutput `pulumi:"member"`
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringOutput `pulumi:"name"`
	// The project in which the resource belongs. If it is not provided, a default will be supplied.
	Project pulumi.StringOutput `pulumi:"project"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role pulumi.StringOutput `pulumi:"role"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.

func GetAutoscalingPolicyIamMember added in v0.26.0

func GetAutoscalingPolicyIamMember(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *AutoscalingPolicyIamMemberState, opts ...pulumi.ResourceOption) (*AutoscalingPolicyIamMember, error)

GetAutoscalingPolicyIamMember gets an existing AutoscalingPolicyIamMember resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewAutoscalingPolicyIamMember added in v0.26.0

func NewAutoscalingPolicyIamMember(ctx *pulumi.Context,
	name string, args *AutoscalingPolicyIamMemberArgs, opts ...pulumi.ResourceOption) (*AutoscalingPolicyIamMember, error)

NewAutoscalingPolicyIamMember registers a new resource with the given unique name, arguments, and options.

func (*AutoscalingPolicyIamMember) ElementType added in v0.26.0

func (*AutoscalingPolicyIamMember) ElementType() reflect.Type

func (*AutoscalingPolicyIamMember) ToAutoscalingPolicyIamMemberOutput added in v0.26.0

func (i *AutoscalingPolicyIamMember) ToAutoscalingPolicyIamMemberOutput() AutoscalingPolicyIamMemberOutput

func (*AutoscalingPolicyIamMember) ToAutoscalingPolicyIamMemberOutputWithContext added in v0.26.0

func (i *AutoscalingPolicyIamMember) ToAutoscalingPolicyIamMemberOutputWithContext(ctx context.Context) AutoscalingPolicyIamMemberOutput

type AutoscalingPolicyIamMemberArgs added in v0.26.0

type AutoscalingPolicyIamMemberArgs struct {
	// An IAM Condition for a given binding.
	Condition iam.ConditionPtrInput
	// Identity that will be granted the privilege in role. The entry can have one of the following values:
	//
	//  * user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
	//  * serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
	//  * group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
	//  * domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
	Member pulumi.StringInput
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringInput
	// The role that should be applied.
	Role pulumi.StringInput
}

The set of arguments for constructing a AutoscalingPolicyIamMember resource.

func (AutoscalingPolicyIamMemberArgs) ElementType added in v0.26.0

type AutoscalingPolicyIamMemberInput added in v0.26.0

type AutoscalingPolicyIamMemberInput interface {
	pulumi.Input

	ToAutoscalingPolicyIamMemberOutput() AutoscalingPolicyIamMemberOutput
	ToAutoscalingPolicyIamMemberOutputWithContext(ctx context.Context) AutoscalingPolicyIamMemberOutput
}

type AutoscalingPolicyIamMemberOutput added in v0.26.0

type AutoscalingPolicyIamMemberOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyIamMemberOutput) Condition added in v0.26.0

An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.

func (AutoscalingPolicyIamMemberOutput) ElementType added in v0.26.0

func (AutoscalingPolicyIamMemberOutput) Etag added in v0.26.0

The etag of the resource's IAM policy.

func (AutoscalingPolicyIamMemberOutput) Member added in v0.26.0

Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.

func (AutoscalingPolicyIamMemberOutput) Name added in v0.26.0

The name of the resource to manage IAM policies for.

func (AutoscalingPolicyIamMemberOutput) Project added in v0.26.0

The project in which the resource belongs. If it is not provided, a default will be supplied.

func (AutoscalingPolicyIamMemberOutput) Role added in v0.26.0

Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.

func (AutoscalingPolicyIamMemberOutput) ToAutoscalingPolicyIamMemberOutput added in v0.26.0

func (o AutoscalingPolicyIamMemberOutput) ToAutoscalingPolicyIamMemberOutput() AutoscalingPolicyIamMemberOutput

func (AutoscalingPolicyIamMemberOutput) ToAutoscalingPolicyIamMemberOutputWithContext added in v0.26.0

func (o AutoscalingPolicyIamMemberOutput) ToAutoscalingPolicyIamMemberOutputWithContext(ctx context.Context) AutoscalingPolicyIamMemberOutput

type AutoscalingPolicyIamMemberState added in v0.26.0

type AutoscalingPolicyIamMemberState struct {
}

func (AutoscalingPolicyIamMemberState) ElementType added in v0.26.0

type AutoscalingPolicyIamPolicy

type AutoscalingPolicyIamPolicy struct {
	pulumi.CustomResourceState

	AutoscalingPolicyId pulumi.StringOutput `pulumi:"autoscalingPolicyId"`
	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings BindingResponseArrayOutput `pulumi:"bindings"`
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag     pulumi.StringOutput `pulumi:"etag"`
	Location pulumi.StringOutput `pulumi:"location"`
	Project  pulumi.StringOutput `pulumi:"project"`
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version pulumi.IntOutput `pulumi:"version"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors. Note - this resource's API doesn't support deletion. When deleted, the resource will persist on Google Cloud even though it will be deleted from Pulumi state.

func GetAutoscalingPolicyIamPolicy

func GetAutoscalingPolicyIamPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *AutoscalingPolicyIamPolicyState, opts ...pulumi.ResourceOption) (*AutoscalingPolicyIamPolicy, error)

GetAutoscalingPolicyIamPolicy gets an existing AutoscalingPolicyIamPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewAutoscalingPolicyIamPolicy

func NewAutoscalingPolicyIamPolicy(ctx *pulumi.Context,
	name string, args *AutoscalingPolicyIamPolicyArgs, opts ...pulumi.ResourceOption) (*AutoscalingPolicyIamPolicy, error)

NewAutoscalingPolicyIamPolicy registers a new resource with the given unique name, arguments, and options.

func (*AutoscalingPolicyIamPolicy) ElementType

func (*AutoscalingPolicyIamPolicy) ElementType() reflect.Type

func (*AutoscalingPolicyIamPolicy) ToAutoscalingPolicyIamPolicyOutput

func (i *AutoscalingPolicyIamPolicy) ToAutoscalingPolicyIamPolicyOutput() AutoscalingPolicyIamPolicyOutput

func (*AutoscalingPolicyIamPolicy) ToAutoscalingPolicyIamPolicyOutputWithContext

func (i *AutoscalingPolicyIamPolicy) ToAutoscalingPolicyIamPolicyOutputWithContext(ctx context.Context) AutoscalingPolicyIamPolicyOutput

type AutoscalingPolicyIamPolicyArgs

type AutoscalingPolicyIamPolicyArgs struct {
	AutoscalingPolicyId pulumi.StringInput
	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings BindingArrayInput
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag     pulumi.StringPtrInput
	Location pulumi.StringPtrInput
	Project  pulumi.StringPtrInput
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version pulumi.IntPtrInput
}

The set of arguments for constructing a AutoscalingPolicyIamPolicy resource.

func (AutoscalingPolicyIamPolicyArgs) ElementType

type AutoscalingPolicyIamPolicyInput

type AutoscalingPolicyIamPolicyInput interface {
	pulumi.Input

	ToAutoscalingPolicyIamPolicyOutput() AutoscalingPolicyIamPolicyOutput
	ToAutoscalingPolicyIamPolicyOutputWithContext(ctx context.Context) AutoscalingPolicyIamPolicyOutput
}

type AutoscalingPolicyIamPolicyOutput

type AutoscalingPolicyIamPolicyOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyIamPolicyOutput) AutoscalingPolicyId added in v0.21.0

func (o AutoscalingPolicyIamPolicyOutput) AutoscalingPolicyId() pulumi.StringOutput

func (AutoscalingPolicyIamPolicyOutput) Bindings added in v0.19.0

Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.

func (AutoscalingPolicyIamPolicyOutput) ElementType

func (AutoscalingPolicyIamPolicyOutput) Etag added in v0.19.0

etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.

func (AutoscalingPolicyIamPolicyOutput) Location added in v0.21.0

func (AutoscalingPolicyIamPolicyOutput) Project added in v0.21.0

func (AutoscalingPolicyIamPolicyOutput) ToAutoscalingPolicyIamPolicyOutput

func (o AutoscalingPolicyIamPolicyOutput) ToAutoscalingPolicyIamPolicyOutput() AutoscalingPolicyIamPolicyOutput

func (AutoscalingPolicyIamPolicyOutput) ToAutoscalingPolicyIamPolicyOutputWithContext

func (o AutoscalingPolicyIamPolicyOutput) ToAutoscalingPolicyIamPolicyOutputWithContext(ctx context.Context) AutoscalingPolicyIamPolicyOutput

func (AutoscalingPolicyIamPolicyOutput) Version added in v0.19.0

Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).

type AutoscalingPolicyIamPolicyState

type AutoscalingPolicyIamPolicyState struct {
}

func (AutoscalingPolicyIamPolicyState) ElementType

type AutoscalingPolicyInput

type AutoscalingPolicyInput interface {
	pulumi.Input

	ToAutoscalingPolicyOutput() AutoscalingPolicyOutput
	ToAutoscalingPolicyOutputWithContext(ctx context.Context) AutoscalingPolicyOutput
}

type AutoscalingPolicyOutput

type AutoscalingPolicyOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyOutput) BasicAlgorithm added in v0.19.0

func (AutoscalingPolicyOutput) ElementType

func (AutoscalingPolicyOutput) ElementType() reflect.Type

func (AutoscalingPolicyOutput) Labels added in v0.19.0

Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy.

func (AutoscalingPolicyOutput) Location added in v0.21.0

func (AutoscalingPolicyOutput) Name added in v0.19.0

The "resource name" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}

func (AutoscalingPolicyOutput) Project added in v0.21.0

func (AutoscalingPolicyOutput) SecondaryWorkerConfig added in v0.19.0

Optional. Describes how the autoscaler will operate for secondary workers.

func (AutoscalingPolicyOutput) ToAutoscalingPolicyOutput

func (o AutoscalingPolicyOutput) ToAutoscalingPolicyOutput() AutoscalingPolicyOutput

func (AutoscalingPolicyOutput) ToAutoscalingPolicyOutputWithContext

func (o AutoscalingPolicyOutput) ToAutoscalingPolicyOutputWithContext(ctx context.Context) AutoscalingPolicyOutput

func (AutoscalingPolicyOutput) WorkerConfig added in v0.19.0

Describes how the autoscaler will operate for primary workers.

type AutoscalingPolicyState

type AutoscalingPolicyState struct {
}

func (AutoscalingPolicyState) ElementType

func (AutoscalingPolicyState) ElementType() reflect.Type

type AuxiliaryNodeGroup added in v0.28.0

type AuxiliaryNodeGroup struct {
	// Node group configuration.
	NodeGroup NodeGroupType `pulumi:"nodeGroup"`
	// Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
	NodeGroupId *string `pulumi:"nodeGroupId"`
}

Node group identification and configuration information.

type AuxiliaryNodeGroupArgs added in v0.28.0

type AuxiliaryNodeGroupArgs struct {
	// Node group configuration.
	NodeGroup NodeGroupTypeInput `pulumi:"nodeGroup"`
	// Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
	NodeGroupId pulumi.StringPtrInput `pulumi:"nodeGroupId"`
}

Node group identification and configuration information.

func (AuxiliaryNodeGroupArgs) ElementType added in v0.28.0

func (AuxiliaryNodeGroupArgs) ElementType() reflect.Type

func (AuxiliaryNodeGroupArgs) ToAuxiliaryNodeGroupOutput added in v0.28.0

func (i AuxiliaryNodeGroupArgs) ToAuxiliaryNodeGroupOutput() AuxiliaryNodeGroupOutput

func (AuxiliaryNodeGroupArgs) ToAuxiliaryNodeGroupOutputWithContext added in v0.28.0

func (i AuxiliaryNodeGroupArgs) ToAuxiliaryNodeGroupOutputWithContext(ctx context.Context) AuxiliaryNodeGroupOutput

type AuxiliaryNodeGroupArray added in v0.28.0

type AuxiliaryNodeGroupArray []AuxiliaryNodeGroupInput

func (AuxiliaryNodeGroupArray) ElementType added in v0.28.0

func (AuxiliaryNodeGroupArray) ElementType() reflect.Type

func (AuxiliaryNodeGroupArray) ToAuxiliaryNodeGroupArrayOutput added in v0.28.0

func (i AuxiliaryNodeGroupArray) ToAuxiliaryNodeGroupArrayOutput() AuxiliaryNodeGroupArrayOutput

func (AuxiliaryNodeGroupArray) ToAuxiliaryNodeGroupArrayOutputWithContext added in v0.28.0

func (i AuxiliaryNodeGroupArray) ToAuxiliaryNodeGroupArrayOutputWithContext(ctx context.Context) AuxiliaryNodeGroupArrayOutput

type AuxiliaryNodeGroupArrayInput added in v0.28.0

type AuxiliaryNodeGroupArrayInput interface {
	pulumi.Input

	ToAuxiliaryNodeGroupArrayOutput() AuxiliaryNodeGroupArrayOutput
	ToAuxiliaryNodeGroupArrayOutputWithContext(context.Context) AuxiliaryNodeGroupArrayOutput
}

AuxiliaryNodeGroupArrayInput is an input type that accepts AuxiliaryNodeGroupArray and AuxiliaryNodeGroupArrayOutput values. You can construct a concrete instance of `AuxiliaryNodeGroupArrayInput` via:

AuxiliaryNodeGroupArray{ AuxiliaryNodeGroupArgs{...} }

type AuxiliaryNodeGroupArrayOutput added in v0.28.0

type AuxiliaryNodeGroupArrayOutput struct{ *pulumi.OutputState }

func (AuxiliaryNodeGroupArrayOutput) ElementType added in v0.28.0

func (AuxiliaryNodeGroupArrayOutput) Index added in v0.28.0

func (AuxiliaryNodeGroupArrayOutput) ToAuxiliaryNodeGroupArrayOutput added in v0.28.0

func (o AuxiliaryNodeGroupArrayOutput) ToAuxiliaryNodeGroupArrayOutput() AuxiliaryNodeGroupArrayOutput

func (AuxiliaryNodeGroupArrayOutput) ToAuxiliaryNodeGroupArrayOutputWithContext added in v0.28.0

func (o AuxiliaryNodeGroupArrayOutput) ToAuxiliaryNodeGroupArrayOutputWithContext(ctx context.Context) AuxiliaryNodeGroupArrayOutput

type AuxiliaryNodeGroupInput added in v0.28.0

type AuxiliaryNodeGroupInput interface {
	pulumi.Input

	ToAuxiliaryNodeGroupOutput() AuxiliaryNodeGroupOutput
	ToAuxiliaryNodeGroupOutputWithContext(context.Context) AuxiliaryNodeGroupOutput
}

AuxiliaryNodeGroupInput is an input type that accepts AuxiliaryNodeGroupArgs and AuxiliaryNodeGroupOutput values. You can construct a concrete instance of `AuxiliaryNodeGroupInput` via:

AuxiliaryNodeGroupArgs{...}

type AuxiliaryNodeGroupOutput added in v0.28.0

type AuxiliaryNodeGroupOutput struct{ *pulumi.OutputState }

Node group identification and configuration information.

func (AuxiliaryNodeGroupOutput) ElementType added in v0.28.0

func (AuxiliaryNodeGroupOutput) ElementType() reflect.Type

func (AuxiliaryNodeGroupOutput) NodeGroup added in v0.28.0

Node group configuration.

func (AuxiliaryNodeGroupOutput) NodeGroupId added in v0.28.0

Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.

func (AuxiliaryNodeGroupOutput) ToAuxiliaryNodeGroupOutput added in v0.28.0

func (o AuxiliaryNodeGroupOutput) ToAuxiliaryNodeGroupOutput() AuxiliaryNodeGroupOutput

func (AuxiliaryNodeGroupOutput) ToAuxiliaryNodeGroupOutputWithContext added in v0.28.0

func (o AuxiliaryNodeGroupOutput) ToAuxiliaryNodeGroupOutputWithContext(ctx context.Context) AuxiliaryNodeGroupOutput

type AuxiliaryNodeGroupResponse added in v0.28.0

type AuxiliaryNodeGroupResponse struct {
	// Node group configuration.
	NodeGroup NodeGroupResponse `pulumi:"nodeGroup"`
	// Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
	NodeGroupId string `pulumi:"nodeGroupId"`
}

Node group identification and configuration information.

type AuxiliaryNodeGroupResponseArrayOutput added in v0.28.0

type AuxiliaryNodeGroupResponseArrayOutput struct{ *pulumi.OutputState }

func (AuxiliaryNodeGroupResponseArrayOutput) ElementType added in v0.28.0

func (AuxiliaryNodeGroupResponseArrayOutput) Index added in v0.28.0

func (AuxiliaryNodeGroupResponseArrayOutput) ToAuxiliaryNodeGroupResponseArrayOutput added in v0.28.0

func (o AuxiliaryNodeGroupResponseArrayOutput) ToAuxiliaryNodeGroupResponseArrayOutput() AuxiliaryNodeGroupResponseArrayOutput

func (AuxiliaryNodeGroupResponseArrayOutput) ToAuxiliaryNodeGroupResponseArrayOutputWithContext added in v0.28.0

func (o AuxiliaryNodeGroupResponseArrayOutput) ToAuxiliaryNodeGroupResponseArrayOutputWithContext(ctx context.Context) AuxiliaryNodeGroupResponseArrayOutput

type AuxiliaryNodeGroupResponseOutput added in v0.28.0

type AuxiliaryNodeGroupResponseOutput struct{ *pulumi.OutputState }

Node group identification and configuration information.

func (AuxiliaryNodeGroupResponseOutput) ElementType added in v0.28.0

func (AuxiliaryNodeGroupResponseOutput) NodeGroup added in v0.28.0

Node group configuration.

func (AuxiliaryNodeGroupResponseOutput) NodeGroupId added in v0.28.0

Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.

func (AuxiliaryNodeGroupResponseOutput) ToAuxiliaryNodeGroupResponseOutput added in v0.28.0

func (o AuxiliaryNodeGroupResponseOutput) ToAuxiliaryNodeGroupResponseOutput() AuxiliaryNodeGroupResponseOutput

func (AuxiliaryNodeGroupResponseOutput) ToAuxiliaryNodeGroupResponseOutputWithContext added in v0.28.0

func (o AuxiliaryNodeGroupResponseOutput) ToAuxiliaryNodeGroupResponseOutputWithContext(ctx context.Context) AuxiliaryNodeGroupResponseOutput

type AuxiliaryServicesConfig added in v0.18.2

type AuxiliaryServicesConfig struct {
	// Optional. The Hive Metastore configuration for this workload.
	MetastoreConfig *MetastoreConfig `pulumi:"metastoreConfig"`
	// Optional. The Spark History Server configuration for the workload.
	SparkHistoryServerConfig *SparkHistoryServerConfig `pulumi:"sparkHistoryServerConfig"`
}

Auxiliary services configuration for a Cluster.

type AuxiliaryServicesConfigArgs added in v0.18.2

type AuxiliaryServicesConfigArgs struct {
	// Optional. The Hive Metastore configuration for this workload.
	MetastoreConfig MetastoreConfigPtrInput `pulumi:"metastoreConfig"`
	// Optional. The Spark History Server configuration for the workload.
	SparkHistoryServerConfig SparkHistoryServerConfigPtrInput `pulumi:"sparkHistoryServerConfig"`
}

Auxiliary services configuration for a Cluster.

func (AuxiliaryServicesConfigArgs) ElementType added in v0.18.2

func (AuxiliaryServicesConfigArgs) ToAuxiliaryServicesConfigOutput added in v0.18.2

func (i AuxiliaryServicesConfigArgs) ToAuxiliaryServicesConfigOutput() AuxiliaryServicesConfigOutput

func (AuxiliaryServicesConfigArgs) ToAuxiliaryServicesConfigOutputWithContext added in v0.18.2

func (i AuxiliaryServicesConfigArgs) ToAuxiliaryServicesConfigOutputWithContext(ctx context.Context) AuxiliaryServicesConfigOutput

func (AuxiliaryServicesConfigArgs) ToAuxiliaryServicesConfigPtrOutput added in v0.18.2

func (i AuxiliaryServicesConfigArgs) ToAuxiliaryServicesConfigPtrOutput() AuxiliaryServicesConfigPtrOutput

func (AuxiliaryServicesConfigArgs) ToAuxiliaryServicesConfigPtrOutputWithContext added in v0.18.2

func (i AuxiliaryServicesConfigArgs) ToAuxiliaryServicesConfigPtrOutputWithContext(ctx context.Context) AuxiliaryServicesConfigPtrOutput

type AuxiliaryServicesConfigInput added in v0.18.2

type AuxiliaryServicesConfigInput interface {
	pulumi.Input

	ToAuxiliaryServicesConfigOutput() AuxiliaryServicesConfigOutput
	ToAuxiliaryServicesConfigOutputWithContext(context.Context) AuxiliaryServicesConfigOutput
}

AuxiliaryServicesConfigInput is an input type that accepts AuxiliaryServicesConfigArgs and AuxiliaryServicesConfigOutput values. You can construct a concrete instance of `AuxiliaryServicesConfigInput` via:

AuxiliaryServicesConfigArgs{...}

type AuxiliaryServicesConfigOutput added in v0.18.2

type AuxiliaryServicesConfigOutput struct{ *pulumi.OutputState }

Auxiliary services configuration for a Cluster.

func (AuxiliaryServicesConfigOutput) ElementType added in v0.18.2

func (AuxiliaryServicesConfigOutput) MetastoreConfig added in v0.18.2

Optional. The Hive Metastore configuration for this workload.

func (AuxiliaryServicesConfigOutput) SparkHistoryServerConfig added in v0.18.2

Optional. The Spark History Server configuration for the workload.

func (AuxiliaryServicesConfigOutput) ToAuxiliaryServicesConfigOutput added in v0.18.2

func (o AuxiliaryServicesConfigOutput) ToAuxiliaryServicesConfigOutput() AuxiliaryServicesConfigOutput

func (AuxiliaryServicesConfigOutput) ToAuxiliaryServicesConfigOutputWithContext added in v0.18.2

func (o AuxiliaryServicesConfigOutput) ToAuxiliaryServicesConfigOutputWithContext(ctx context.Context) AuxiliaryServicesConfigOutput

func (AuxiliaryServicesConfigOutput) ToAuxiliaryServicesConfigPtrOutput added in v0.18.2

func (o AuxiliaryServicesConfigOutput) ToAuxiliaryServicesConfigPtrOutput() AuxiliaryServicesConfigPtrOutput

func (AuxiliaryServicesConfigOutput) ToAuxiliaryServicesConfigPtrOutputWithContext added in v0.18.2

func (o AuxiliaryServicesConfigOutput) ToAuxiliaryServicesConfigPtrOutputWithContext(ctx context.Context) AuxiliaryServicesConfigPtrOutput

type AuxiliaryServicesConfigPtrInput added in v0.18.2

type AuxiliaryServicesConfigPtrInput interface {
	pulumi.Input

	ToAuxiliaryServicesConfigPtrOutput() AuxiliaryServicesConfigPtrOutput
	ToAuxiliaryServicesConfigPtrOutputWithContext(context.Context) AuxiliaryServicesConfigPtrOutput
}

AuxiliaryServicesConfigPtrInput is an input type that accepts AuxiliaryServicesConfigArgs, AuxiliaryServicesConfigPtr and AuxiliaryServicesConfigPtrOutput values. You can construct a concrete instance of `AuxiliaryServicesConfigPtrInput` via:

        AuxiliaryServicesConfigArgs{...}

or:

        nil

func AuxiliaryServicesConfigPtr added in v0.18.2

func AuxiliaryServicesConfigPtr(v *AuxiliaryServicesConfigArgs) AuxiliaryServicesConfigPtrInput

type AuxiliaryServicesConfigPtrOutput added in v0.18.2

type AuxiliaryServicesConfigPtrOutput struct{ *pulumi.OutputState }

func (AuxiliaryServicesConfigPtrOutput) Elem added in v0.18.2

func (AuxiliaryServicesConfigPtrOutput) ElementType added in v0.18.2

func (AuxiliaryServicesConfigPtrOutput) MetastoreConfig added in v0.18.2

Optional. The Hive Metastore configuration for this workload.

func (AuxiliaryServicesConfigPtrOutput) SparkHistoryServerConfig added in v0.18.2

Optional. The Spark History Server configuration for the workload.

func (AuxiliaryServicesConfigPtrOutput) ToAuxiliaryServicesConfigPtrOutput added in v0.18.2

func (o AuxiliaryServicesConfigPtrOutput) ToAuxiliaryServicesConfigPtrOutput() AuxiliaryServicesConfigPtrOutput

func (AuxiliaryServicesConfigPtrOutput) ToAuxiliaryServicesConfigPtrOutputWithContext added in v0.18.2

func (o AuxiliaryServicesConfigPtrOutput) ToAuxiliaryServicesConfigPtrOutputWithContext(ctx context.Context) AuxiliaryServicesConfigPtrOutput

type AuxiliaryServicesConfigResponse added in v0.18.2

type AuxiliaryServicesConfigResponse struct {
	// Optional. The Hive Metastore configuration for this workload.
	MetastoreConfig MetastoreConfigResponse `pulumi:"metastoreConfig"`
	// Optional. The Spark History Server configuration for the workload.
	SparkHistoryServerConfig SparkHistoryServerConfigResponse `pulumi:"sparkHistoryServerConfig"`
}

Auxiliary services configuration for a Cluster.

type AuxiliaryServicesConfigResponseOutput added in v0.18.2

type AuxiliaryServicesConfigResponseOutput struct{ *pulumi.OutputState }

Auxiliary services configuration for a Cluster.

func (AuxiliaryServicesConfigResponseOutput) ElementType added in v0.18.2

func (AuxiliaryServicesConfigResponseOutput) MetastoreConfig added in v0.18.2

Optional. The Hive Metastore configuration for this workload.

func (AuxiliaryServicesConfigResponseOutput) SparkHistoryServerConfig added in v0.18.2

Optional. The Spark History Server configuration for the workload.

func (AuxiliaryServicesConfigResponseOutput) ToAuxiliaryServicesConfigResponseOutput added in v0.18.2

func (o AuxiliaryServicesConfigResponseOutput) ToAuxiliaryServicesConfigResponseOutput() AuxiliaryServicesConfigResponseOutput

func (AuxiliaryServicesConfigResponseOutput) ToAuxiliaryServicesConfigResponseOutputWithContext added in v0.18.2

func (o AuxiliaryServicesConfigResponseOutput) ToAuxiliaryServicesConfigResponseOutputWithContext(ctx context.Context) AuxiliaryServicesConfigResponseOutput

type BasicAutoscalingAlgorithm

type BasicAutoscalingAlgorithm struct {
	// Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.
	CooldownPeriod *string `pulumi:"cooldownPeriod"`
	// Optional. Spark Standalone autoscaling configuration
	SparkStandaloneConfig *SparkStandaloneAutoscalingConfig `pulumi:"sparkStandaloneConfig"`
	// Optional. YARN autoscaling configuration.
	YarnConfig *BasicYarnAutoscalingConfig `pulumi:"yarnConfig"`
}

Basic algorithm for autoscaling.

type BasicAutoscalingAlgorithmArgs

type BasicAutoscalingAlgorithmArgs struct {
	// Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.
	CooldownPeriod pulumi.StringPtrInput `pulumi:"cooldownPeriod"`
	// Optional. Spark Standalone autoscaling configuration
	SparkStandaloneConfig SparkStandaloneAutoscalingConfigPtrInput `pulumi:"sparkStandaloneConfig"`
	// Optional. YARN autoscaling configuration.
	YarnConfig BasicYarnAutoscalingConfigPtrInput `pulumi:"yarnConfig"`
}

Basic algorithm for autoscaling.

func (BasicAutoscalingAlgorithmArgs) ElementType

func (BasicAutoscalingAlgorithmArgs) ToBasicAutoscalingAlgorithmOutput

func (i BasicAutoscalingAlgorithmArgs) ToBasicAutoscalingAlgorithmOutput() BasicAutoscalingAlgorithmOutput

func (BasicAutoscalingAlgorithmArgs) ToBasicAutoscalingAlgorithmOutputWithContext

func (i BasicAutoscalingAlgorithmArgs) ToBasicAutoscalingAlgorithmOutputWithContext(ctx context.Context) BasicAutoscalingAlgorithmOutput

func (BasicAutoscalingAlgorithmArgs) ToBasicAutoscalingAlgorithmPtrOutput

func (i BasicAutoscalingAlgorithmArgs) ToBasicAutoscalingAlgorithmPtrOutput() BasicAutoscalingAlgorithmPtrOutput

func (BasicAutoscalingAlgorithmArgs) ToBasicAutoscalingAlgorithmPtrOutputWithContext

func (i BasicAutoscalingAlgorithmArgs) ToBasicAutoscalingAlgorithmPtrOutputWithContext(ctx context.Context) BasicAutoscalingAlgorithmPtrOutput

type BasicAutoscalingAlgorithmInput

type BasicAutoscalingAlgorithmInput interface {
	pulumi.Input

	ToBasicAutoscalingAlgorithmOutput() BasicAutoscalingAlgorithmOutput
	ToBasicAutoscalingAlgorithmOutputWithContext(context.Context) BasicAutoscalingAlgorithmOutput
}

BasicAutoscalingAlgorithmInput is an input type that accepts BasicAutoscalingAlgorithmArgs and BasicAutoscalingAlgorithmOutput values. You can construct a concrete instance of `BasicAutoscalingAlgorithmInput` via:

BasicAutoscalingAlgorithmArgs{...}

type BasicAutoscalingAlgorithmOutput

type BasicAutoscalingAlgorithmOutput struct{ *pulumi.OutputState }

Basic algorithm for autoscaling.

func (BasicAutoscalingAlgorithmOutput) CooldownPeriod

Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.

func (BasicAutoscalingAlgorithmOutput) ElementType

func (BasicAutoscalingAlgorithmOutput) SparkStandaloneConfig added in v0.9.0

Optional. Spark Standalone autoscaling configuration

func (BasicAutoscalingAlgorithmOutput) ToBasicAutoscalingAlgorithmOutput

func (o BasicAutoscalingAlgorithmOutput) ToBasicAutoscalingAlgorithmOutput() BasicAutoscalingAlgorithmOutput

func (BasicAutoscalingAlgorithmOutput) ToBasicAutoscalingAlgorithmOutputWithContext

func (o BasicAutoscalingAlgorithmOutput) ToBasicAutoscalingAlgorithmOutputWithContext(ctx context.Context) BasicAutoscalingAlgorithmOutput

func (BasicAutoscalingAlgorithmOutput) ToBasicAutoscalingAlgorithmPtrOutput

func (o BasicAutoscalingAlgorithmOutput) ToBasicAutoscalingAlgorithmPtrOutput() BasicAutoscalingAlgorithmPtrOutput

func (BasicAutoscalingAlgorithmOutput) ToBasicAutoscalingAlgorithmPtrOutputWithContext

func (o BasicAutoscalingAlgorithmOutput) ToBasicAutoscalingAlgorithmPtrOutputWithContext(ctx context.Context) BasicAutoscalingAlgorithmPtrOutput

func (BasicAutoscalingAlgorithmOutput) YarnConfig

Optional. YARN autoscaling configuration.

type BasicAutoscalingAlgorithmPtrInput

type BasicAutoscalingAlgorithmPtrInput interface {
	pulumi.Input

	ToBasicAutoscalingAlgorithmPtrOutput() BasicAutoscalingAlgorithmPtrOutput
	ToBasicAutoscalingAlgorithmPtrOutputWithContext(context.Context) BasicAutoscalingAlgorithmPtrOutput
}

BasicAutoscalingAlgorithmPtrInput is an input type that accepts BasicAutoscalingAlgorithmArgs, BasicAutoscalingAlgorithmPtr and BasicAutoscalingAlgorithmPtrOutput values. You can construct a concrete instance of `BasicAutoscalingAlgorithmPtrInput` via:

        BasicAutoscalingAlgorithmArgs{...}

or:

        nil

type BasicAutoscalingAlgorithmPtrOutput

type BasicAutoscalingAlgorithmPtrOutput struct{ *pulumi.OutputState }

func (BasicAutoscalingAlgorithmPtrOutput) CooldownPeriod

Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.

func (BasicAutoscalingAlgorithmPtrOutput) Elem

func (BasicAutoscalingAlgorithmPtrOutput) ElementType

func (BasicAutoscalingAlgorithmPtrOutput) SparkStandaloneConfig added in v0.9.0

Optional. Spark Standalone autoscaling configuration

func (BasicAutoscalingAlgorithmPtrOutput) ToBasicAutoscalingAlgorithmPtrOutput

func (o BasicAutoscalingAlgorithmPtrOutput) ToBasicAutoscalingAlgorithmPtrOutput() BasicAutoscalingAlgorithmPtrOutput

func (BasicAutoscalingAlgorithmPtrOutput) ToBasicAutoscalingAlgorithmPtrOutputWithContext

func (o BasicAutoscalingAlgorithmPtrOutput) ToBasicAutoscalingAlgorithmPtrOutputWithContext(ctx context.Context) BasicAutoscalingAlgorithmPtrOutput

func (BasicAutoscalingAlgorithmPtrOutput) YarnConfig

Optional. YARN autoscaling configuration.

type BasicAutoscalingAlgorithmResponse

type BasicAutoscalingAlgorithmResponse struct {
	// Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.
	CooldownPeriod string `pulumi:"cooldownPeriod"`
	// Optional. Spark Standalone autoscaling configuration
	SparkStandaloneConfig SparkStandaloneAutoscalingConfigResponse `pulumi:"sparkStandaloneConfig"`
	// Optional. YARN autoscaling configuration.
	YarnConfig BasicYarnAutoscalingConfigResponse `pulumi:"yarnConfig"`
}

Basic algorithm for autoscaling.

type BasicAutoscalingAlgorithmResponseOutput

type BasicAutoscalingAlgorithmResponseOutput struct{ *pulumi.OutputState }

Basic algorithm for autoscaling.

func (BasicAutoscalingAlgorithmResponseOutput) CooldownPeriod

Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.

func (BasicAutoscalingAlgorithmResponseOutput) ElementType

func (BasicAutoscalingAlgorithmResponseOutput) SparkStandaloneConfig added in v0.9.0

Optional. Spark Standalone autoscaling configuration

func (BasicAutoscalingAlgorithmResponseOutput) ToBasicAutoscalingAlgorithmResponseOutput

func (o BasicAutoscalingAlgorithmResponseOutput) ToBasicAutoscalingAlgorithmResponseOutput() BasicAutoscalingAlgorithmResponseOutput

func (BasicAutoscalingAlgorithmResponseOutput) ToBasicAutoscalingAlgorithmResponseOutputWithContext

func (o BasicAutoscalingAlgorithmResponseOutput) ToBasicAutoscalingAlgorithmResponseOutputWithContext(ctx context.Context) BasicAutoscalingAlgorithmResponseOutput

func (BasicAutoscalingAlgorithmResponseOutput) YarnConfig

Optional. YARN autoscaling configuration.

type BasicYarnAutoscalingConfig

type BasicYarnAutoscalingConfig struct {
	// Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations.Bounds: 0s, 1d.
	GracefulDecommissionTimeout string `pulumi:"gracefulDecommissionTimeout"`
	// Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.
	ScaleDownFactor float64 `pulumi:"scaleDownFactor"`
	// Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
	ScaleDownMinWorkerFraction *float64 `pulumi:"scaleDownMinWorkerFraction"`
	// Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.
	ScaleUpFactor float64 `pulumi:"scaleUpFactor"`
	// Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
	ScaleUpMinWorkerFraction *float64 `pulumi:"scaleUpMinWorkerFraction"`
}

Basic autoscaling configurations for YARN.

type BasicYarnAutoscalingConfigArgs

type BasicYarnAutoscalingConfigArgs struct {
	// Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations.Bounds: 0s, 1d.
	GracefulDecommissionTimeout pulumi.StringInput `pulumi:"gracefulDecommissionTimeout"`
	// Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.
	ScaleDownFactor pulumi.Float64Input `pulumi:"scaleDownFactor"`
	// Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
	ScaleDownMinWorkerFraction pulumi.Float64PtrInput `pulumi:"scaleDownMinWorkerFraction"`
	// Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.
	ScaleUpFactor pulumi.Float64Input `pulumi:"scaleUpFactor"`
	// Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
	ScaleUpMinWorkerFraction pulumi.Float64PtrInput `pulumi:"scaleUpMinWorkerFraction"`
}

Basic autoscaling configurations for YARN.

func (BasicYarnAutoscalingConfigArgs) ElementType

func (BasicYarnAutoscalingConfigArgs) ToBasicYarnAutoscalingConfigOutput

func (i BasicYarnAutoscalingConfigArgs) ToBasicYarnAutoscalingConfigOutput() BasicYarnAutoscalingConfigOutput

func (BasicYarnAutoscalingConfigArgs) ToBasicYarnAutoscalingConfigOutputWithContext

func (i BasicYarnAutoscalingConfigArgs) ToBasicYarnAutoscalingConfigOutputWithContext(ctx context.Context) BasicYarnAutoscalingConfigOutput

func (BasicYarnAutoscalingConfigArgs) ToBasicYarnAutoscalingConfigPtrOutput

func (i BasicYarnAutoscalingConfigArgs) ToBasicYarnAutoscalingConfigPtrOutput() BasicYarnAutoscalingConfigPtrOutput

func (BasicYarnAutoscalingConfigArgs) ToBasicYarnAutoscalingConfigPtrOutputWithContext

func (i BasicYarnAutoscalingConfigArgs) ToBasicYarnAutoscalingConfigPtrOutputWithContext(ctx context.Context) BasicYarnAutoscalingConfigPtrOutput

type BasicYarnAutoscalingConfigInput

type BasicYarnAutoscalingConfigInput interface {
	pulumi.Input

	ToBasicYarnAutoscalingConfigOutput() BasicYarnAutoscalingConfigOutput
	ToBasicYarnAutoscalingConfigOutputWithContext(context.Context) BasicYarnAutoscalingConfigOutput
}

BasicYarnAutoscalingConfigInput is an input type that accepts BasicYarnAutoscalingConfigArgs and BasicYarnAutoscalingConfigOutput values. You can construct a concrete instance of `BasicYarnAutoscalingConfigInput` via:

BasicYarnAutoscalingConfigArgs{...}

type BasicYarnAutoscalingConfigOutput

type BasicYarnAutoscalingConfigOutput struct{ *pulumi.OutputState }

Basic autoscaling configurations for YARN.

func (BasicYarnAutoscalingConfigOutput) ElementType

func (BasicYarnAutoscalingConfigOutput) GracefulDecommissionTimeout

func (o BasicYarnAutoscalingConfigOutput) GracefulDecommissionTimeout() pulumi.StringOutput

Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations.Bounds: 0s, 1d.

func (BasicYarnAutoscalingConfigOutput) ScaleDownFactor

Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.

func (BasicYarnAutoscalingConfigOutput) ScaleDownMinWorkerFraction

func (o BasicYarnAutoscalingConfigOutput) ScaleDownMinWorkerFraction() pulumi.Float64PtrOutput

Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.

func (BasicYarnAutoscalingConfigOutput) ScaleUpFactor

Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.

func (BasicYarnAutoscalingConfigOutput) ScaleUpMinWorkerFraction

func (o BasicYarnAutoscalingConfigOutput) ScaleUpMinWorkerFraction() pulumi.Float64PtrOutput

Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.

func (BasicYarnAutoscalingConfigOutput) ToBasicYarnAutoscalingConfigOutput

func (o BasicYarnAutoscalingConfigOutput) ToBasicYarnAutoscalingConfigOutput() BasicYarnAutoscalingConfigOutput

func (BasicYarnAutoscalingConfigOutput) ToBasicYarnAutoscalingConfigOutputWithContext

func (o BasicYarnAutoscalingConfigOutput) ToBasicYarnAutoscalingConfigOutputWithContext(ctx context.Context) BasicYarnAutoscalingConfigOutput

func (BasicYarnAutoscalingConfigOutput) ToBasicYarnAutoscalingConfigPtrOutput

func (o BasicYarnAutoscalingConfigOutput) ToBasicYarnAutoscalingConfigPtrOutput() BasicYarnAutoscalingConfigPtrOutput

func (BasicYarnAutoscalingConfigOutput) ToBasicYarnAutoscalingConfigPtrOutputWithContext

func (o BasicYarnAutoscalingConfigOutput) ToBasicYarnAutoscalingConfigPtrOutputWithContext(ctx context.Context) BasicYarnAutoscalingConfigPtrOutput

type BasicYarnAutoscalingConfigPtrInput

type BasicYarnAutoscalingConfigPtrInput interface {
	pulumi.Input

	ToBasicYarnAutoscalingConfigPtrOutput() BasicYarnAutoscalingConfigPtrOutput
	ToBasicYarnAutoscalingConfigPtrOutputWithContext(context.Context) BasicYarnAutoscalingConfigPtrOutput
}

BasicYarnAutoscalingConfigPtrInput is an input type that accepts BasicYarnAutoscalingConfigArgs, BasicYarnAutoscalingConfigPtr and BasicYarnAutoscalingConfigPtrOutput values. You can construct a concrete instance of `BasicYarnAutoscalingConfigPtrInput` via:

        BasicYarnAutoscalingConfigArgs{...}

or:

        nil

type BasicYarnAutoscalingConfigPtrOutput

type BasicYarnAutoscalingConfigPtrOutput struct{ *pulumi.OutputState }

func (BasicYarnAutoscalingConfigPtrOutput) Elem

func (BasicYarnAutoscalingConfigPtrOutput) ElementType

func (BasicYarnAutoscalingConfigPtrOutput) GracefulDecommissionTimeout

func (o BasicYarnAutoscalingConfigPtrOutput) GracefulDecommissionTimeout() pulumi.StringPtrOutput

Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations.Bounds: 0s, 1d.

func (BasicYarnAutoscalingConfigPtrOutput) ScaleDownFactor

Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.

func (BasicYarnAutoscalingConfigPtrOutput) ScaleDownMinWorkerFraction

func (o BasicYarnAutoscalingConfigPtrOutput) ScaleDownMinWorkerFraction() pulumi.Float64PtrOutput

Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.

func (BasicYarnAutoscalingConfigPtrOutput) ScaleUpFactor

Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.

func (BasicYarnAutoscalingConfigPtrOutput) ScaleUpMinWorkerFraction

func (o BasicYarnAutoscalingConfigPtrOutput) ScaleUpMinWorkerFraction() pulumi.Float64PtrOutput

Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.

func (BasicYarnAutoscalingConfigPtrOutput) ToBasicYarnAutoscalingConfigPtrOutput

func (o BasicYarnAutoscalingConfigPtrOutput) ToBasicYarnAutoscalingConfigPtrOutput() BasicYarnAutoscalingConfigPtrOutput

func (BasicYarnAutoscalingConfigPtrOutput) ToBasicYarnAutoscalingConfigPtrOutputWithContext

func (o BasicYarnAutoscalingConfigPtrOutput) ToBasicYarnAutoscalingConfigPtrOutputWithContext(ctx context.Context) BasicYarnAutoscalingConfigPtrOutput

type BasicYarnAutoscalingConfigResponse

type BasicYarnAutoscalingConfigResponse struct {
	// Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations.Bounds: 0s, 1d.
	GracefulDecommissionTimeout string `pulumi:"gracefulDecommissionTimeout"`
	// Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.
	ScaleDownFactor float64 `pulumi:"scaleDownFactor"`
	// Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
	ScaleDownMinWorkerFraction float64 `pulumi:"scaleDownMinWorkerFraction"`
	// Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.
	ScaleUpFactor float64 `pulumi:"scaleUpFactor"`
	// Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
	ScaleUpMinWorkerFraction float64 `pulumi:"scaleUpMinWorkerFraction"`
}

Basic autoscaling configurations for YARN.

type BasicYarnAutoscalingConfigResponseOutput

type BasicYarnAutoscalingConfigResponseOutput struct{ *pulumi.OutputState }

Basic autoscaling configurations for YARN.

func (BasicYarnAutoscalingConfigResponseOutput) ElementType

func (BasicYarnAutoscalingConfigResponseOutput) GracefulDecommissionTimeout

func (o BasicYarnAutoscalingConfigResponseOutput) GracefulDecommissionTimeout() pulumi.StringOutput

Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations.Bounds: 0s, 1d.

func (BasicYarnAutoscalingConfigResponseOutput) ScaleDownFactor

Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.

func (BasicYarnAutoscalingConfigResponseOutput) ScaleDownMinWorkerFraction

func (o BasicYarnAutoscalingConfigResponseOutput) ScaleDownMinWorkerFraction() pulumi.Float64Output

Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.

func (BasicYarnAutoscalingConfigResponseOutput) ScaleUpFactor

Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.

func (BasicYarnAutoscalingConfigResponseOutput) ScaleUpMinWorkerFraction

func (o BasicYarnAutoscalingConfigResponseOutput) ScaleUpMinWorkerFraction() pulumi.Float64Output

Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.

func (BasicYarnAutoscalingConfigResponseOutput) ToBasicYarnAutoscalingConfigResponseOutput

func (o BasicYarnAutoscalingConfigResponseOutput) ToBasicYarnAutoscalingConfigResponseOutput() BasicYarnAutoscalingConfigResponseOutput

func (BasicYarnAutoscalingConfigResponseOutput) ToBasicYarnAutoscalingConfigResponseOutputWithContext

func (o BasicYarnAutoscalingConfigResponseOutput) ToBasicYarnAutoscalingConfigResponseOutputWithContext(ctx context.Context) BasicYarnAutoscalingConfigResponseOutput

type Batch added in v0.12.0

type Batch struct {
	pulumi.CustomResourceState

	// Optional. The ID to use for the batch, which will become the final component of the batch's resource name.This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
	BatchId pulumi.StringPtrOutput `pulumi:"batchId"`
	// The time when the batch was created.
	CreateTime pulumi.StringOutput `pulumi:"createTime"`
	// The email address of the user who created the batch.
	Creator pulumi.StringOutput `pulumi:"creator"`
	// Optional. Environment configuration for the batch execution.
	EnvironmentConfig EnvironmentConfigResponseOutput `pulumi:"environmentConfig"`
	// Optional. The labels to associate with this batch. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a batch.
	Labels   pulumi.StringMapOutput `pulumi:"labels"`
	Location pulumi.StringOutput    `pulumi:"location"`
	// The resource name of the batch.
	Name pulumi.StringOutput `pulumi:"name"`
	// The resource name of the operation associated with this batch.
	Operation pulumi.StringOutput `pulumi:"operation"`
	Project   pulumi.StringOutput `pulumi:"project"`
	// Optional. PySpark batch config.
	PysparkBatch PySparkBatchResponseOutput `pulumi:"pysparkBatch"`
	// Optional. A unique ID used to identify the request. If the service receives two CreateBatchRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first Batch created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId pulumi.StringPtrOutput `pulumi:"requestId"`
	// Optional. Runtime configuration for the batch execution.
	RuntimeConfig RuntimeConfigResponseOutput `pulumi:"runtimeConfig"`
	// Runtime information about batch execution.
	RuntimeInfo RuntimeInfoResponseOutput `pulumi:"runtimeInfo"`
	// Optional. Spark batch config.
	SparkBatch SparkBatchResponseOutput `pulumi:"sparkBatch"`
	// Optional. SparkR batch config.
	SparkRBatch SparkRBatchResponseOutput `pulumi:"sparkRBatch"`
	// Optional. SparkSql batch config.
	SparkSqlBatch SparkSqlBatchResponseOutput `pulumi:"sparkSqlBatch"`
	// The state of the batch.
	State pulumi.StringOutput `pulumi:"state"`
	// Historical state information for the batch.
	StateHistory StateHistoryResponseArrayOutput `pulumi:"stateHistory"`
	// Batch state details, such as a failure description if the state is FAILED.
	StateMessage pulumi.StringOutput `pulumi:"stateMessage"`
	// The time when the batch entered a current state.
	StateTime pulumi.StringOutput `pulumi:"stateTime"`
	// A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
	Uuid pulumi.StringOutput `pulumi:"uuid"`
}

Creates a batch workload that executes asynchronously. Auto-naming is currently not supported for this resource.

func GetBatch added in v0.12.0

func GetBatch(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *BatchState, opts ...pulumi.ResourceOption) (*Batch, error)

GetBatch gets an existing Batch resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewBatch added in v0.12.0

func NewBatch(ctx *pulumi.Context,
	name string, args *BatchArgs, opts ...pulumi.ResourceOption) (*Batch, error)

NewBatch registers a new resource with the given unique name, arguments, and options.

func (*Batch) ElementType added in v0.12.0

func (*Batch) ElementType() reflect.Type

func (*Batch) ToBatchOutput added in v0.12.0

func (i *Batch) ToBatchOutput() BatchOutput

func (*Batch) ToBatchOutputWithContext added in v0.12.0

func (i *Batch) ToBatchOutputWithContext(ctx context.Context) BatchOutput

type BatchArgs added in v0.12.0

type BatchArgs struct {
	// Optional. The ID to use for the batch, which will become the final component of the batch's resource name.This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
	BatchId pulumi.StringPtrInput
	// Optional. Environment configuration for the batch execution.
	EnvironmentConfig EnvironmentConfigPtrInput
	// Optional. The labels to associate with this batch. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a batch.
	Labels   pulumi.StringMapInput
	Location pulumi.StringPtrInput
	Project  pulumi.StringPtrInput
	// Optional. PySpark batch config.
	PysparkBatch PySparkBatchPtrInput
	// Optional. A unique ID used to identify the request. If the service receives two CreateBatchRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first Batch created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId pulumi.StringPtrInput
	// Optional. Runtime configuration for the batch execution.
	RuntimeConfig RuntimeConfigPtrInput
	// Optional. Spark batch config.
	SparkBatch SparkBatchPtrInput
	// Optional. SparkR batch config.
	SparkRBatch SparkRBatchPtrInput
	// Optional. SparkSql batch config.
	SparkSqlBatch SparkSqlBatchPtrInput
}

The set of arguments for constructing a Batch resource.

func (BatchArgs) ElementType added in v0.12.0

func (BatchArgs) ElementType() reflect.Type

type BatchInput added in v0.12.0

type BatchInput interface {
	pulumi.Input

	ToBatchOutput() BatchOutput
	ToBatchOutputWithContext(ctx context.Context) BatchOutput
}

type BatchOutput added in v0.12.0

type BatchOutput struct{ *pulumi.OutputState }

func (BatchOutput) BatchId added in v0.21.0

func (o BatchOutput) BatchId() pulumi.StringPtrOutput

Optional. The ID to use for the batch, which will become the final component of the batch's resource name.This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.

func (BatchOutput) CreateTime added in v0.19.0

func (o BatchOutput) CreateTime() pulumi.StringOutput

The time when the batch was created.

func (BatchOutput) Creator added in v0.19.0

func (o BatchOutput) Creator() pulumi.StringOutput

The email address of the user who created the batch.

func (BatchOutput) ElementType added in v0.12.0

func (BatchOutput) ElementType() reflect.Type

func (BatchOutput) EnvironmentConfig added in v0.19.0

func (o BatchOutput) EnvironmentConfig() EnvironmentConfigResponseOutput

Optional. Environment configuration for the batch execution.

func (BatchOutput) Labels added in v0.19.0

func (o BatchOutput) Labels() pulumi.StringMapOutput

Optional. The labels to associate with this batch. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a batch.

func (BatchOutput) Location added in v0.21.0

func (o BatchOutput) Location() pulumi.StringOutput

func (BatchOutput) Name added in v0.19.0

func (o BatchOutput) Name() pulumi.StringOutput

The resource name of the batch.

func (BatchOutput) Operation added in v0.19.0

func (o BatchOutput) Operation() pulumi.StringOutput

The resource name of the operation associated with this batch.

func (BatchOutput) Project added in v0.21.0

func (o BatchOutput) Project() pulumi.StringOutput

func (BatchOutput) PysparkBatch added in v0.19.0

func (o BatchOutput) PysparkBatch() PySparkBatchResponseOutput

Optional. PySpark batch config.

func (BatchOutput) RequestId added in v0.21.0

func (o BatchOutput) RequestId() pulumi.StringPtrOutput

Optional. A unique ID used to identify the request. If the service receives two CreateBatchRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first Batch created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.

func (BatchOutput) RuntimeConfig added in v0.19.0

func (o BatchOutput) RuntimeConfig() RuntimeConfigResponseOutput

Optional. Runtime configuration for the batch execution.

func (BatchOutput) RuntimeInfo added in v0.19.0

func (o BatchOutput) RuntimeInfo() RuntimeInfoResponseOutput

Runtime information about batch execution.

func (BatchOutput) SparkBatch added in v0.19.0

func (o BatchOutput) SparkBatch() SparkBatchResponseOutput

Optional. Spark batch config.

func (BatchOutput) SparkRBatch added in v0.19.0

func (o BatchOutput) SparkRBatch() SparkRBatchResponseOutput

Optional. SparkR batch config.

func (BatchOutput) SparkSqlBatch added in v0.19.0

func (o BatchOutput) SparkSqlBatch() SparkSqlBatchResponseOutput

Optional. SparkSql batch config.

func (BatchOutput) State added in v0.19.0

func (o BatchOutput) State() pulumi.StringOutput

The state of the batch.

func (BatchOutput) StateHistory added in v0.19.0

Historical state information for the batch.

func (BatchOutput) StateMessage added in v0.19.0

func (o BatchOutput) StateMessage() pulumi.StringOutput

Batch state details, such as a failure description if the state is FAILED.

func (BatchOutput) StateTime added in v0.19.0

func (o BatchOutput) StateTime() pulumi.StringOutput

The time when the batch entered a current state.

func (BatchOutput) ToBatchOutput added in v0.12.0

func (o BatchOutput) ToBatchOutput() BatchOutput

func (BatchOutput) ToBatchOutputWithContext added in v0.12.0

func (o BatchOutput) ToBatchOutputWithContext(ctx context.Context) BatchOutput

func (BatchOutput) Uuid added in v0.19.0

func (o BatchOutput) Uuid() pulumi.StringOutput

A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.

type BatchState added in v0.12.0

type BatchState struct {
}

func (BatchState) ElementType added in v0.12.0

func (BatchState) ElementType() reflect.Type

type Binding

type Binding struct {
	// The condition that is associated with this binding.If the condition evaluates to true, then this binding applies to the current request.If the condition evaluates to false, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Condition *Expr `pulumi:"condition"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Members []string `pulumi:"members"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role *string `pulumi:"role"`
}

Associates members, or principals, with a role.

type BindingArgs

type BindingArgs struct {
	// The condition that is associated with this binding.If the condition evaluates to true, then this binding applies to the current request.If the condition evaluates to false, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Condition ExprPtrInput `pulumi:"condition"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Members pulumi.StringArrayInput `pulumi:"members"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role pulumi.StringPtrInput `pulumi:"role"`
}

Associates members, or principals, with a role.

func (BindingArgs) ElementType

func (BindingArgs) ElementType() reflect.Type

func (BindingArgs) ToBindingOutput

func (i BindingArgs) ToBindingOutput() BindingOutput

func (BindingArgs) ToBindingOutputWithContext

func (i BindingArgs) ToBindingOutputWithContext(ctx context.Context) BindingOutput

type BindingArray

type BindingArray []BindingInput

func (BindingArray) ElementType

func (BindingArray) ElementType() reflect.Type

func (BindingArray) ToBindingArrayOutput

func (i BindingArray) ToBindingArrayOutput() BindingArrayOutput

func (BindingArray) ToBindingArrayOutputWithContext

func (i BindingArray) ToBindingArrayOutputWithContext(ctx context.Context) BindingArrayOutput

type BindingArrayInput

type BindingArrayInput interface {
	pulumi.Input

	ToBindingArrayOutput() BindingArrayOutput
	ToBindingArrayOutputWithContext(context.Context) BindingArrayOutput
}

BindingArrayInput is an input type that accepts BindingArray and BindingArrayOutput values. You can construct a concrete instance of `BindingArrayInput` via:

BindingArray{ BindingArgs{...} }

type BindingArrayOutput

type BindingArrayOutput struct{ *pulumi.OutputState }

func (BindingArrayOutput) ElementType

func (BindingArrayOutput) ElementType() reflect.Type

func (BindingArrayOutput) Index

func (BindingArrayOutput) ToBindingArrayOutput

func (o BindingArrayOutput) ToBindingArrayOutput() BindingArrayOutput

func (BindingArrayOutput) ToBindingArrayOutputWithContext

func (o BindingArrayOutput) ToBindingArrayOutputWithContext(ctx context.Context) BindingArrayOutput

type BindingInput

type BindingInput interface {
	pulumi.Input

	ToBindingOutput() BindingOutput
	ToBindingOutputWithContext(context.Context) BindingOutput
}

BindingInput is an input type that accepts BindingArgs and BindingOutput values. You can construct a concrete instance of `BindingInput` via:

BindingArgs{...}

type BindingOutput

type BindingOutput struct{ *pulumi.OutputState }

Associates members, or principals, with a role.

func (BindingOutput) Condition

func (o BindingOutput) Condition() ExprPtrOutput

The condition that is associated with this binding.If the condition evaluates to true, then this binding applies to the current request.If the condition evaluates to false, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).

func (BindingOutput) ElementType

func (BindingOutput) ElementType() reflect.Type

func (BindingOutput) Members

Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.

func (BindingOutput) Role

Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.

func (BindingOutput) ToBindingOutput

func (o BindingOutput) ToBindingOutput() BindingOutput

func (BindingOutput) ToBindingOutputWithContext

func (o BindingOutput) ToBindingOutputWithContext(ctx context.Context) BindingOutput

type BindingResponse

type BindingResponse struct {
	// The condition that is associated with this binding.If the condition evaluates to true, then this binding applies to the current request.If the condition evaluates to false, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Condition ExprResponse `pulumi:"condition"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Members []string `pulumi:"members"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role string `pulumi:"role"`
}

Associates members, or principals, with a role.

type BindingResponseArrayOutput

type BindingResponseArrayOutput struct{ *pulumi.OutputState }

func (BindingResponseArrayOutput) ElementType

func (BindingResponseArrayOutput) ElementType() reflect.Type

func (BindingResponseArrayOutput) Index

func (BindingResponseArrayOutput) ToBindingResponseArrayOutput

func (o BindingResponseArrayOutput) ToBindingResponseArrayOutput() BindingResponseArrayOutput

func (BindingResponseArrayOutput) ToBindingResponseArrayOutputWithContext

func (o BindingResponseArrayOutput) ToBindingResponseArrayOutputWithContext(ctx context.Context) BindingResponseArrayOutput

type BindingResponseOutput

type BindingResponseOutput struct{ *pulumi.OutputState }

Associates members, or principals, with a role.

func (BindingResponseOutput) Condition

The condition that is associated with this binding.If the condition evaluates to true, then this binding applies to the current request.If the condition evaluates to false, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).

func (BindingResponseOutput) ElementType

func (BindingResponseOutput) ElementType() reflect.Type

func (BindingResponseOutput) Members

Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.

func (BindingResponseOutput) Role

Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.

func (BindingResponseOutput) ToBindingResponseOutput

func (o BindingResponseOutput) ToBindingResponseOutput() BindingResponseOutput

func (BindingResponseOutput) ToBindingResponseOutputWithContext

func (o BindingResponseOutput) ToBindingResponseOutputWithContext(ctx context.Context) BindingResponseOutput

type Cluster added in v0.3.0

type Cluster struct {
	pulumi.CustomResourceState

	// Optional. Failure action when primary worker creation fails.
	ActionOnFailedPrimaryWorkers pulumi.StringPtrOutput `pulumi:"actionOnFailedPrimaryWorkers"`
	// The cluster name, which must be unique within a project. The name must start with a lowercase letter, and can contain up to 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The name of a deleted cluster can be reused.
	ClusterName pulumi.StringOutput `pulumi:"clusterName"`
	// A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster.
	ClusterUuid pulumi.StringOutput `pulumi:"clusterUuid"`
	// Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated.Exactly one of ClusterConfig or VirtualClusterConfig must be specified.
	Config ClusterConfigResponseOutput `pulumi:"config"`
	// Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.
	Labels pulumi.StringMapOutput `pulumi:"labels"`
	// Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.
	Metrics ClusterMetricsResponseOutput `pulumi:"metrics"`
	Project pulumi.StringOutput          `pulumi:"project"`
	Region  pulumi.StringOutput          `pulumi:"region"`
	// Optional. A unique ID used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId pulumi.StringPtrOutput `pulumi:"requestId"`
	// Cluster status.
	Status ClusterStatusResponseOutput `pulumi:"status"`
	// The previous cluster status.
	StatusHistory ClusterStatusResponseArrayOutput `pulumi:"statusHistory"`
	// Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified.
	VirtualClusterConfig VirtualClusterConfigResponseOutput `pulumi:"virtualClusterConfig"`
}

Creates a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). Auto-naming is currently not supported for this resource.

func GetCluster added in v0.3.0

func GetCluster(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *ClusterState, opts ...pulumi.ResourceOption) (*Cluster, error)

GetCluster gets an existing Cluster resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewCluster added in v0.3.0

func NewCluster(ctx *pulumi.Context,
	name string, args *ClusterArgs, opts ...pulumi.ResourceOption) (*Cluster, error)

NewCluster registers a new resource with the given unique name, arguments, and options.

func (*Cluster) ElementType added in v0.3.0

func (*Cluster) ElementType() reflect.Type

func (*Cluster) ToClusterOutput added in v0.3.0

func (i *Cluster) ToClusterOutput() ClusterOutput

func (*Cluster) ToClusterOutputWithContext added in v0.3.0

func (i *Cluster) ToClusterOutputWithContext(ctx context.Context) ClusterOutput

type ClusterArgs added in v0.3.0

type ClusterArgs struct {
	// Optional. Failure action when primary worker creation fails.
	ActionOnFailedPrimaryWorkers pulumi.StringPtrInput
	// The cluster name, which must be unique within a project. The name must start with a lowercase letter, and can contain up to 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The name of a deleted cluster can be reused.
	ClusterName pulumi.StringInput
	// Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated.Exactly one of ClusterConfig or VirtualClusterConfig must be specified.
	Config ClusterConfigPtrInput
	// Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.
	Labels pulumi.StringMapInput
	// The Google Cloud Platform project ID that the cluster belongs to.
	Project pulumi.StringPtrInput
	Region  pulumi.StringInput
	// Optional. A unique ID used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId pulumi.StringPtrInput
	// Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified.
	VirtualClusterConfig VirtualClusterConfigPtrInput
}

The set of arguments for constructing a Cluster resource.

func (ClusterArgs) ElementType added in v0.3.0

func (ClusterArgs) ElementType() reflect.Type

type ClusterConfig

type ClusterConfig struct {
	// Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
	AutoscalingConfig *AutoscalingConfig `pulumi:"autoscalingConfig"`
	// Optional. The node group settings.
	AuxiliaryNodeGroups []AuxiliaryNodeGroup `pulumi:"auxiliaryNodeGroups"`
	// Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
	ConfigBucket *string `pulumi:"configBucket"`
	// Optional. The config for Dataproc metrics.
	DataprocMetricConfig *DataprocMetricConfig `pulumi:"dataprocMetricConfig"`
	// Optional. Encryption settings for the cluster.
	EncryptionConfig *EncryptionConfig `pulumi:"encryptionConfig"`
	// Optional. Port/endpoint configuration for this cluster
	EndpointConfig *EndpointConfig `pulumi:"endpointConfig"`
	// Optional. The shared Compute Engine config settings for all instances in a cluster.
	GceClusterConfig *GceClusterConfig `pulumi:"gceClusterConfig"`
	// Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. These config settings are mutually exclusive with Compute Engine-based options, such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config.
	GkeClusterConfig *GkeClusterConfig `pulumi:"gkeClusterConfig"`
	// Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi
	InitializationActions []NodeInitializationAction `pulumi:"initializationActions"`
	// Optional. Lifecycle setting for the cluster.
	LifecycleConfig *LifecycleConfig `pulumi:"lifecycleConfig"`
	// Optional. The Compute Engine config settings for the cluster's master instance.
	MasterConfig *InstanceGroupConfig `pulumi:"masterConfig"`
	// Optional. Metastore configuration.
	MetastoreConfig *MetastoreConfig `pulumi:"metastoreConfig"`
	// Optional. The Compute Engine config settings for a cluster's secondary worker instances
	SecondaryWorkerConfig *InstanceGroupConfig `pulumi:"secondaryWorkerConfig"`
	// Optional. Security settings for the cluster.
	SecurityConfig *SecurityConfig `pulumi:"securityConfig"`
	// Optional. The config settings for cluster software.
	SoftwareConfig *SoftwareConfig `pulumi:"softwareConfig"`
	// Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
	TempBucket *string `pulumi:"tempBucket"`
	// Optional. The Compute Engine config settings for the cluster's worker instances.
	WorkerConfig *InstanceGroupConfig `pulumi:"workerConfig"`
}

The cluster config.

type ClusterConfigArgs

type ClusterConfigArgs struct {
	// Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
	AutoscalingConfig AutoscalingConfigPtrInput `pulumi:"autoscalingConfig"`
	// Optional. The node group settings.
	AuxiliaryNodeGroups AuxiliaryNodeGroupArrayInput `pulumi:"auxiliaryNodeGroups"`
	// Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
	ConfigBucket pulumi.StringPtrInput `pulumi:"configBucket"`
	// Optional. The config for Dataproc metrics.
	DataprocMetricConfig DataprocMetricConfigPtrInput `pulumi:"dataprocMetricConfig"`
	// Optional. Encryption settings for the cluster.
	EncryptionConfig EncryptionConfigPtrInput `pulumi:"encryptionConfig"`
	// Optional. Port/endpoint configuration for this cluster
	EndpointConfig EndpointConfigPtrInput `pulumi:"endpointConfig"`
	// Optional. The shared Compute Engine config settings for all instances in a cluster.
	GceClusterConfig GceClusterConfigPtrInput `pulumi:"gceClusterConfig"`
	// Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. These config settings are mutually exclusive with Compute Engine-based options, such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config.
	GkeClusterConfig GkeClusterConfigPtrInput `pulumi:"gkeClusterConfig"`
	// Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi
	InitializationActions NodeInitializationActionArrayInput `pulumi:"initializationActions"`
	// Optional. Lifecycle setting for the cluster.
	LifecycleConfig LifecycleConfigPtrInput `pulumi:"lifecycleConfig"`
	// Optional. The Compute Engine config settings for the cluster's master instance.
	MasterConfig InstanceGroupConfigPtrInput `pulumi:"masterConfig"`
	// Optional. Metastore configuration.
	MetastoreConfig MetastoreConfigPtrInput `pulumi:"metastoreConfig"`
	// Optional. The Compute Engine config settings for a cluster's secondary worker instances
	SecondaryWorkerConfig InstanceGroupConfigPtrInput `pulumi:"secondaryWorkerConfig"`
	// Optional. Security settings for the cluster.
	SecurityConfig SecurityConfigPtrInput `pulumi:"securityConfig"`
	// Optional. The config settings for cluster software.
	SoftwareConfig SoftwareConfigPtrInput `pulumi:"softwareConfig"`
	// Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
	TempBucket pulumi.StringPtrInput `pulumi:"tempBucket"`
	// Optional. The Compute Engine config settings for the cluster's worker instances.
	WorkerConfig InstanceGroupConfigPtrInput `pulumi:"workerConfig"`
}

The cluster config.

func (ClusterConfigArgs) ElementType

func (ClusterConfigArgs) ElementType() reflect.Type

func (ClusterConfigArgs) ToClusterConfigOutput

func (i ClusterConfigArgs) ToClusterConfigOutput() ClusterConfigOutput

func (ClusterConfigArgs) ToClusterConfigOutputWithContext

func (i ClusterConfigArgs) ToClusterConfigOutputWithContext(ctx context.Context) ClusterConfigOutput

func (ClusterConfigArgs) ToClusterConfigPtrOutput

func (i ClusterConfigArgs) ToClusterConfigPtrOutput() ClusterConfigPtrOutput

func (ClusterConfigArgs) ToClusterConfigPtrOutputWithContext

func (i ClusterConfigArgs) ToClusterConfigPtrOutputWithContext(ctx context.Context) ClusterConfigPtrOutput

type ClusterConfigInput

type ClusterConfigInput interface {
	pulumi.Input

	ToClusterConfigOutput() ClusterConfigOutput
	ToClusterConfigOutputWithContext(context.Context) ClusterConfigOutput
}

ClusterConfigInput is an input type that accepts ClusterConfigArgs and ClusterConfigOutput values. You can construct a concrete instance of `ClusterConfigInput` via:

ClusterConfigArgs{...}

type ClusterConfigOutput

type ClusterConfigOutput struct{ *pulumi.OutputState }

The cluster config.

func (ClusterConfigOutput) AutoscalingConfig

func (o ClusterConfigOutput) AutoscalingConfig() AutoscalingConfigPtrOutput

Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.

func (ClusterConfigOutput) AuxiliaryNodeGroups added in v0.28.0

func (o ClusterConfigOutput) AuxiliaryNodeGroups() AuxiliaryNodeGroupArrayOutput

Optional. The node group settings.

func (ClusterConfigOutput) ConfigBucket

func (o ClusterConfigOutput) ConfigBucket() pulumi.StringPtrOutput

Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.

func (ClusterConfigOutput) DataprocMetricConfig added in v0.15.0

func (o ClusterConfigOutput) DataprocMetricConfig() DataprocMetricConfigPtrOutput

Optional. The config for Dataproc metrics.

func (ClusterConfigOutput) ElementType

func (ClusterConfigOutput) ElementType() reflect.Type

func (ClusterConfigOutput) EncryptionConfig

func (o ClusterConfigOutput) EncryptionConfig() EncryptionConfigPtrOutput

Optional. Encryption settings for the cluster.

func (ClusterConfigOutput) EndpointConfig

func (o ClusterConfigOutput) EndpointConfig() EndpointConfigPtrOutput

Optional. Port/endpoint configuration for this cluster

func (ClusterConfigOutput) GceClusterConfig

func (o ClusterConfigOutput) GceClusterConfig() GceClusterConfigPtrOutput

Optional. The shared Compute Engine config settings for all instances in a cluster.

func (ClusterConfigOutput) GkeClusterConfig

func (o ClusterConfigOutput) GkeClusterConfig() GkeClusterConfigPtrOutput

Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. These config settings are mutually exclusive with Compute Engine-based options, such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config.

func (ClusterConfigOutput) InitializationActions

func (o ClusterConfigOutput) InitializationActions() NodeInitializationActionArrayOutput

Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi

func (ClusterConfigOutput) LifecycleConfig

func (o ClusterConfigOutput) LifecycleConfig() LifecycleConfigPtrOutput

Optional. Lifecycle setting for the cluster.

func (ClusterConfigOutput) MasterConfig

Optional. The Compute Engine config settings for the cluster's master instance.

func (ClusterConfigOutput) MetastoreConfig

func (o ClusterConfigOutput) MetastoreConfig() MetastoreConfigPtrOutput

Optional. Metastore configuration.

func (ClusterConfigOutput) SecondaryWorkerConfig

func (o ClusterConfigOutput) SecondaryWorkerConfig() InstanceGroupConfigPtrOutput

Optional. The Compute Engine config settings for a cluster's secondary worker instances

func (ClusterConfigOutput) SecurityConfig

func (o ClusterConfigOutput) SecurityConfig() SecurityConfigPtrOutput

Optional. Security settings for the cluster.

func (ClusterConfigOutput) SoftwareConfig

func (o ClusterConfigOutput) SoftwareConfig() SoftwareConfigPtrOutput

Optional. The config settings for cluster software.

func (ClusterConfigOutput) TempBucket

Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.

func (ClusterConfigOutput) ToClusterConfigOutput

func (o ClusterConfigOutput) ToClusterConfigOutput() ClusterConfigOutput

func (ClusterConfigOutput) ToClusterConfigOutputWithContext

func (o ClusterConfigOutput) ToClusterConfigOutputWithContext(ctx context.Context) ClusterConfigOutput

func (ClusterConfigOutput) ToClusterConfigPtrOutput

func (o ClusterConfigOutput) ToClusterConfigPtrOutput() ClusterConfigPtrOutput

func (ClusterConfigOutput) ToClusterConfigPtrOutputWithContext

func (o ClusterConfigOutput) ToClusterConfigPtrOutputWithContext(ctx context.Context) ClusterConfigPtrOutput

func (ClusterConfigOutput) WorkerConfig

Optional. The Compute Engine config settings for the cluster's worker instances.

type ClusterConfigPtrInput

type ClusterConfigPtrInput interface {
	pulumi.Input

	ToClusterConfigPtrOutput() ClusterConfigPtrOutput
	ToClusterConfigPtrOutputWithContext(context.Context) ClusterConfigPtrOutput
}

ClusterConfigPtrInput is an input type that accepts ClusterConfigArgs, ClusterConfigPtr and ClusterConfigPtrOutput values. You can construct a concrete instance of `ClusterConfigPtrInput` via:

        ClusterConfigArgs{...}

or:

        nil

type ClusterConfigPtrOutput

type ClusterConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterConfigPtrOutput) AutoscalingConfig

func (o ClusterConfigPtrOutput) AutoscalingConfig() AutoscalingConfigPtrOutput

Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.

func (ClusterConfigPtrOutput) AuxiliaryNodeGroups added in v0.28.0

func (o ClusterConfigPtrOutput) AuxiliaryNodeGroups() AuxiliaryNodeGroupArrayOutput

Optional. The node group settings.

func (ClusterConfigPtrOutput) ConfigBucket

Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.

func (ClusterConfigPtrOutput) DataprocMetricConfig added in v0.15.0

func (o ClusterConfigPtrOutput) DataprocMetricConfig() DataprocMetricConfigPtrOutput

Optional. The config for Dataproc metrics.

func (ClusterConfigPtrOutput) Elem

func (ClusterConfigPtrOutput) ElementType

func (ClusterConfigPtrOutput) ElementType() reflect.Type

func (ClusterConfigPtrOutput) EncryptionConfig

Optional. Encryption settings for the cluster.

func (ClusterConfigPtrOutput) EndpointConfig

Optional. Port/endpoint configuration for this cluster

func (ClusterConfigPtrOutput) GceClusterConfig

Optional. The shared Compute Engine config settings for all instances in a cluster.

func (ClusterConfigPtrOutput) GkeClusterConfig

Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. These config settings are mutually exclusive with Compute Engine-based options, such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config.

func (ClusterConfigPtrOutput) InitializationActions

Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi

func (ClusterConfigPtrOutput) LifecycleConfig

Optional. Lifecycle setting for the cluster.

func (ClusterConfigPtrOutput) MasterConfig

Optional. The Compute Engine config settings for the cluster's master instance.

func (ClusterConfigPtrOutput) MetastoreConfig

Optional. Metastore configuration.

func (ClusterConfigPtrOutput) SecondaryWorkerConfig

func (o ClusterConfigPtrOutput) SecondaryWorkerConfig() InstanceGroupConfigPtrOutput

Optional. The Compute Engine config settings for a cluster's secondary worker instances

func (ClusterConfigPtrOutput) SecurityConfig

Optional. Security settings for the cluster.

func (ClusterConfigPtrOutput) SoftwareConfig

Optional. The config settings for cluster software.

func (ClusterConfigPtrOutput) TempBucket

Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.

func (ClusterConfigPtrOutput) ToClusterConfigPtrOutput

func (o ClusterConfigPtrOutput) ToClusterConfigPtrOutput() ClusterConfigPtrOutput

func (ClusterConfigPtrOutput) ToClusterConfigPtrOutputWithContext

func (o ClusterConfigPtrOutput) ToClusterConfigPtrOutputWithContext(ctx context.Context) ClusterConfigPtrOutput

func (ClusterConfigPtrOutput) WorkerConfig

Optional. The Compute Engine config settings for the cluster's worker instances.

type ClusterConfigResponse

type ClusterConfigResponse struct {
	// Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
	AutoscalingConfig AutoscalingConfigResponse `pulumi:"autoscalingConfig"`
	// Optional. The node group settings.
	AuxiliaryNodeGroups []AuxiliaryNodeGroupResponse `pulumi:"auxiliaryNodeGroups"`
	// Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
	ConfigBucket string `pulumi:"configBucket"`
	// Optional. The config for Dataproc metrics.
	DataprocMetricConfig DataprocMetricConfigResponse `pulumi:"dataprocMetricConfig"`
	// Optional. Encryption settings for the cluster.
	EncryptionConfig EncryptionConfigResponse `pulumi:"encryptionConfig"`
	// Optional. Port/endpoint configuration for this cluster
	EndpointConfig EndpointConfigResponse `pulumi:"endpointConfig"`
	// Optional. The shared Compute Engine config settings for all instances in a cluster.
	GceClusterConfig GceClusterConfigResponse `pulumi:"gceClusterConfig"`
	// Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. These config settings are mutually exclusive with Compute Engine-based options, such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config.
	GkeClusterConfig GkeClusterConfigResponse `pulumi:"gkeClusterConfig"`
	// Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi
	InitializationActions []NodeInitializationActionResponse `pulumi:"initializationActions"`
	// Optional. Lifecycle setting for the cluster.
	LifecycleConfig LifecycleConfigResponse `pulumi:"lifecycleConfig"`
	// Optional. The Compute Engine config settings for the cluster's master instance.
	MasterConfig InstanceGroupConfigResponse `pulumi:"masterConfig"`
	// Optional. Metastore configuration.
	MetastoreConfig MetastoreConfigResponse `pulumi:"metastoreConfig"`
	// Optional. The Compute Engine config settings for a cluster's secondary worker instances
	SecondaryWorkerConfig InstanceGroupConfigResponse `pulumi:"secondaryWorkerConfig"`
	// Optional. Security settings for the cluster.
	SecurityConfig SecurityConfigResponse `pulumi:"securityConfig"`
	// Optional. The config settings for cluster software.
	SoftwareConfig SoftwareConfigResponse `pulumi:"softwareConfig"`
	// Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
	TempBucket string `pulumi:"tempBucket"`
	// Optional. The Compute Engine config settings for the cluster's worker instances.
	WorkerConfig InstanceGroupConfigResponse `pulumi:"workerConfig"`
}

The cluster config.

type ClusterConfigResponseOutput

type ClusterConfigResponseOutput struct{ *pulumi.OutputState }

The cluster config.

func (ClusterConfigResponseOutput) AutoscalingConfig

Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.

func (ClusterConfigResponseOutput) AuxiliaryNodeGroups added in v0.28.0

Optional. The node group settings.

func (ClusterConfigResponseOutput) ConfigBucket

Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.

func (ClusterConfigResponseOutput) DataprocMetricConfig added in v0.15.0

Optional. The config for Dataproc metrics.

func (ClusterConfigResponseOutput) ElementType

func (ClusterConfigResponseOutput) EncryptionConfig

Optional. Encryption settings for the cluster.

func (ClusterConfigResponseOutput) EndpointConfig

Optional. Port/endpoint configuration for this cluster

func (ClusterConfigResponseOutput) GceClusterConfig

Optional. The shared Compute Engine config settings for all instances in a cluster.

func (ClusterConfigResponseOutput) GkeClusterConfig

Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. These config settings are mutually exclusive with Compute Engine-based options, such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config.

func (ClusterConfigResponseOutput) InitializationActions

Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi

func (ClusterConfigResponseOutput) LifecycleConfig

Optional. Lifecycle setting for the cluster.

func (ClusterConfigResponseOutput) MasterConfig

Optional. The Compute Engine config settings for the cluster's master instance.

func (ClusterConfigResponseOutput) MetastoreConfig

Optional. Metastore configuration.

func (ClusterConfigResponseOutput) SecondaryWorkerConfig

Optional. The Compute Engine config settings for a cluster's secondary worker instances

func (ClusterConfigResponseOutput) SecurityConfig

Optional. Security settings for the cluster.

func (ClusterConfigResponseOutput) SoftwareConfig

Optional. The config settings for cluster software.

func (ClusterConfigResponseOutput) TempBucket

Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.

func (ClusterConfigResponseOutput) ToClusterConfigResponseOutput

func (o ClusterConfigResponseOutput) ToClusterConfigResponseOutput() ClusterConfigResponseOutput

func (ClusterConfigResponseOutput) ToClusterConfigResponseOutputWithContext

func (o ClusterConfigResponseOutput) ToClusterConfigResponseOutputWithContext(ctx context.Context) ClusterConfigResponseOutput

func (ClusterConfigResponseOutput) WorkerConfig

Optional. The Compute Engine config settings for the cluster's worker instances.

type ClusterInput added in v0.3.0

type ClusterInput interface {
	pulumi.Input

	ToClusterOutput() ClusterOutput
	ToClusterOutputWithContext(ctx context.Context) ClusterOutput
}

type ClusterMetricsResponse

type ClusterMetricsResponse struct {
	// The HDFS metrics.
	HdfsMetrics map[string]string `pulumi:"hdfsMetrics"`
	// YARN metrics.
	YarnMetrics map[string]string `pulumi:"yarnMetrics"`
}

Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.

type ClusterMetricsResponseOutput

type ClusterMetricsResponseOutput struct{ *pulumi.OutputState }

Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.

func (ClusterMetricsResponseOutput) ElementType

func (ClusterMetricsResponseOutput) HdfsMetrics

The HDFS metrics.

func (ClusterMetricsResponseOutput) ToClusterMetricsResponseOutput

func (o ClusterMetricsResponseOutput) ToClusterMetricsResponseOutput() ClusterMetricsResponseOutput

func (ClusterMetricsResponseOutput) ToClusterMetricsResponseOutputWithContext

func (o ClusterMetricsResponseOutput) ToClusterMetricsResponseOutputWithContext(ctx context.Context) ClusterMetricsResponseOutput

func (ClusterMetricsResponseOutput) YarnMetrics

YARN metrics.

type ClusterOutput added in v0.3.0

type ClusterOutput struct{ *pulumi.OutputState }

func (ClusterOutput) ActionOnFailedPrimaryWorkers added in v0.21.0

func (o ClusterOutput) ActionOnFailedPrimaryWorkers() pulumi.StringPtrOutput

Optional. Failure action when primary worker creation fails.

func (ClusterOutput) ClusterName added in v0.19.0

func (o ClusterOutput) ClusterName() pulumi.StringOutput

The cluster name, which must be unique within a project. The name must start with a lowercase letter, and can contain up to 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The name of a deleted cluster can be reused.

func (ClusterOutput) ClusterUuid added in v0.19.0

func (o ClusterOutput) ClusterUuid() pulumi.StringOutput

A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster.

func (ClusterOutput) Config added in v0.19.0

Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated.Exactly one of ClusterConfig or VirtualClusterConfig must be specified.

func (ClusterOutput) ElementType added in v0.3.0

func (ClusterOutput) ElementType() reflect.Type

func (ClusterOutput) Labels added in v0.19.0

Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.

func (ClusterOutput) Metrics added in v0.19.0

Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.

func (ClusterOutput) Project added in v0.19.0

func (o ClusterOutput) Project() pulumi.StringOutput

func (ClusterOutput) Region added in v0.21.0

func (o ClusterOutput) Region() pulumi.StringOutput

func (ClusterOutput) RequestId added in v0.21.0

func (o ClusterOutput) RequestId() pulumi.StringPtrOutput

Optional. A unique ID used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.

func (ClusterOutput) Status added in v0.19.0

Cluster status.

func (ClusterOutput) StatusHistory added in v0.19.0

The previous cluster status.

func (ClusterOutput) ToClusterOutput added in v0.3.0

func (o ClusterOutput) ToClusterOutput() ClusterOutput

func (ClusterOutput) ToClusterOutputWithContext added in v0.3.0

func (o ClusterOutput) ToClusterOutputWithContext(ctx context.Context) ClusterOutput

func (ClusterOutput) VirtualClusterConfig added in v0.19.0

func (o ClusterOutput) VirtualClusterConfig() VirtualClusterConfigResponseOutput

Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified.

type ClusterSelector

type ClusterSelector struct {
	// The cluster labels. Cluster must have all labels to match.
	ClusterLabels map[string]string `pulumi:"clusterLabels"`
	// Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster.If unspecified, the zone of the first cluster matching the selector is used.
	Zone *string `pulumi:"zone"`
}

A selector that chooses target cluster for jobs based on metadata.

type ClusterSelectorArgs

type ClusterSelectorArgs struct {
	// The cluster labels. Cluster must have all labels to match.
	ClusterLabels pulumi.StringMapInput `pulumi:"clusterLabels"`
	// Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster.If unspecified, the zone of the first cluster matching the selector is used.
	Zone pulumi.StringPtrInput `pulumi:"zone"`
}

A selector that chooses target cluster for jobs based on metadata.

func (ClusterSelectorArgs) ElementType

func (ClusterSelectorArgs) ElementType() reflect.Type

func (ClusterSelectorArgs) ToClusterSelectorOutput

func (i ClusterSelectorArgs) ToClusterSelectorOutput() ClusterSelectorOutput

func (ClusterSelectorArgs) ToClusterSelectorOutputWithContext

func (i ClusterSelectorArgs) ToClusterSelectorOutputWithContext(ctx context.Context) ClusterSelectorOutput

func (ClusterSelectorArgs) ToClusterSelectorPtrOutput

func (i ClusterSelectorArgs) ToClusterSelectorPtrOutput() ClusterSelectorPtrOutput

func (ClusterSelectorArgs) ToClusterSelectorPtrOutputWithContext

func (i ClusterSelectorArgs) ToClusterSelectorPtrOutputWithContext(ctx context.Context) ClusterSelectorPtrOutput

type ClusterSelectorInput

type ClusterSelectorInput interface {
	pulumi.Input

	ToClusterSelectorOutput() ClusterSelectorOutput
	ToClusterSelectorOutputWithContext(context.Context) ClusterSelectorOutput
}

ClusterSelectorInput is an input type that accepts ClusterSelectorArgs and ClusterSelectorOutput values. You can construct a concrete instance of `ClusterSelectorInput` via:

ClusterSelectorArgs{...}

type ClusterSelectorOutput

type ClusterSelectorOutput struct{ *pulumi.OutputState }

A selector that chooses target cluster for jobs based on metadata.

func (ClusterSelectorOutput) ClusterLabels

func (o ClusterSelectorOutput) ClusterLabels() pulumi.StringMapOutput

The cluster labels. Cluster must have all labels to match.

func (ClusterSelectorOutput) ElementType

func (ClusterSelectorOutput) ElementType() reflect.Type

func (ClusterSelectorOutput) ToClusterSelectorOutput

func (o ClusterSelectorOutput) ToClusterSelectorOutput() ClusterSelectorOutput

func (ClusterSelectorOutput) ToClusterSelectorOutputWithContext

func (o ClusterSelectorOutput) ToClusterSelectorOutputWithContext(ctx context.Context) ClusterSelectorOutput

func (ClusterSelectorOutput) ToClusterSelectorPtrOutput

func (o ClusterSelectorOutput) ToClusterSelectorPtrOutput() ClusterSelectorPtrOutput

func (ClusterSelectorOutput) ToClusterSelectorPtrOutputWithContext

func (o ClusterSelectorOutput) ToClusterSelectorPtrOutputWithContext(ctx context.Context) ClusterSelectorPtrOutput

func (ClusterSelectorOutput) Zone

Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster.If unspecified, the zone of the first cluster matching the selector is used.

type ClusterSelectorPtrInput

type ClusterSelectorPtrInput interface {
	pulumi.Input

	ToClusterSelectorPtrOutput() ClusterSelectorPtrOutput
	ToClusterSelectorPtrOutputWithContext(context.Context) ClusterSelectorPtrOutput
}

ClusterSelectorPtrInput is an input type that accepts ClusterSelectorArgs, ClusterSelectorPtr and ClusterSelectorPtrOutput values. You can construct a concrete instance of `ClusterSelectorPtrInput` via:

        ClusterSelectorArgs{...}

or:

        nil

type ClusterSelectorPtrOutput

type ClusterSelectorPtrOutput struct{ *pulumi.OutputState }

func (ClusterSelectorPtrOutput) ClusterLabels

The cluster labels. Cluster must have all labels to match.

func (ClusterSelectorPtrOutput) Elem

func (ClusterSelectorPtrOutput) ElementType

func (ClusterSelectorPtrOutput) ElementType() reflect.Type

func (ClusterSelectorPtrOutput) ToClusterSelectorPtrOutput

func (o ClusterSelectorPtrOutput) ToClusterSelectorPtrOutput() ClusterSelectorPtrOutput

func (ClusterSelectorPtrOutput) ToClusterSelectorPtrOutputWithContext

func (o ClusterSelectorPtrOutput) ToClusterSelectorPtrOutputWithContext(ctx context.Context) ClusterSelectorPtrOutput

func (ClusterSelectorPtrOutput) Zone

Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster.If unspecified, the zone of the first cluster matching the selector is used.

type ClusterSelectorResponse

type ClusterSelectorResponse struct {
	// The cluster labels. Cluster must have all labels to match.
	ClusterLabels map[string]string `pulumi:"clusterLabels"`
	// Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster.If unspecified, the zone of the first cluster matching the selector is used.
	Zone string `pulumi:"zone"`
}

A selector that chooses target cluster for jobs based on metadata.

type ClusterSelectorResponseOutput

type ClusterSelectorResponseOutput struct{ *pulumi.OutputState }

A selector that chooses target cluster for jobs based on metadata.

func (ClusterSelectorResponseOutput) ClusterLabels

The cluster labels. Cluster must have all labels to match.

func (ClusterSelectorResponseOutput) ElementType

func (ClusterSelectorResponseOutput) ToClusterSelectorResponseOutput

func (o ClusterSelectorResponseOutput) ToClusterSelectorResponseOutput() ClusterSelectorResponseOutput

func (ClusterSelectorResponseOutput) ToClusterSelectorResponseOutputWithContext

func (o ClusterSelectorResponseOutput) ToClusterSelectorResponseOutputWithContext(ctx context.Context) ClusterSelectorResponseOutput

func (ClusterSelectorResponseOutput) Zone

Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster.If unspecified, the zone of the first cluster matching the selector is used.

type ClusterState added in v0.3.0

type ClusterState struct {
}

func (ClusterState) ElementType added in v0.3.0

func (ClusterState) ElementType() reflect.Type

type ClusterStatusResponse

type ClusterStatusResponse struct {
	// Optional. Output only. Details of cluster's state.
	Detail string `pulumi:"detail"`
	// The cluster's state.
	State string `pulumi:"state"`
	// Time when this state was entered (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	StateStartTime string `pulumi:"stateStartTime"`
	// Additional state information that includes status reported by the agent.
	Substate string `pulumi:"substate"`
}

The status of a cluster and its instances.

type ClusterStatusResponseArrayOutput

type ClusterStatusResponseArrayOutput struct{ *pulumi.OutputState }

func (ClusterStatusResponseArrayOutput) ElementType

func (ClusterStatusResponseArrayOutput) Index

func (ClusterStatusResponseArrayOutput) ToClusterStatusResponseArrayOutput

func (o ClusterStatusResponseArrayOutput) ToClusterStatusResponseArrayOutput() ClusterStatusResponseArrayOutput

func (ClusterStatusResponseArrayOutput) ToClusterStatusResponseArrayOutputWithContext

func (o ClusterStatusResponseArrayOutput) ToClusterStatusResponseArrayOutputWithContext(ctx context.Context) ClusterStatusResponseArrayOutput

type ClusterStatusResponseOutput

type ClusterStatusResponseOutput struct{ *pulumi.OutputState }

The status of a cluster and its instances.

func (ClusterStatusResponseOutput) Detail

Optional. Output only. Details of cluster's state.

func (ClusterStatusResponseOutput) ElementType

func (ClusterStatusResponseOutput) State

The cluster's state.

func (ClusterStatusResponseOutput) StateStartTime

func (o ClusterStatusResponseOutput) StateStartTime() pulumi.StringOutput

Time when this state was entered (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (ClusterStatusResponseOutput) Substate

Additional state information that includes status reported by the agent.

func (ClusterStatusResponseOutput) ToClusterStatusResponseOutput

func (o ClusterStatusResponseOutput) ToClusterStatusResponseOutput() ClusterStatusResponseOutput

func (ClusterStatusResponseOutput) ToClusterStatusResponseOutputWithContext

func (o ClusterStatusResponseOutput) ToClusterStatusResponseOutputWithContext(ctx context.Context) ClusterStatusResponseOutput

type ConfidentialInstanceConfig

type ConfidentialInstanceConfig struct {
	// Optional. Defines whether the instance should have confidential compute enabled.
	EnableConfidentialCompute *bool `pulumi:"enableConfidentialCompute"`
}

Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)

type ConfidentialInstanceConfigArgs

type ConfidentialInstanceConfigArgs struct {
	// Optional. Defines whether the instance should have confidential compute enabled.
	EnableConfidentialCompute pulumi.BoolPtrInput `pulumi:"enableConfidentialCompute"`
}

Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)

func (ConfidentialInstanceConfigArgs) ElementType

func (ConfidentialInstanceConfigArgs) ToConfidentialInstanceConfigOutput

func (i ConfidentialInstanceConfigArgs) ToConfidentialInstanceConfigOutput() ConfidentialInstanceConfigOutput

func (ConfidentialInstanceConfigArgs) ToConfidentialInstanceConfigOutputWithContext

func (i ConfidentialInstanceConfigArgs) ToConfidentialInstanceConfigOutputWithContext(ctx context.Context) ConfidentialInstanceConfigOutput

func (ConfidentialInstanceConfigArgs) ToConfidentialInstanceConfigPtrOutput

func (i ConfidentialInstanceConfigArgs) ToConfidentialInstanceConfigPtrOutput() ConfidentialInstanceConfigPtrOutput

func (ConfidentialInstanceConfigArgs) ToConfidentialInstanceConfigPtrOutputWithContext

func (i ConfidentialInstanceConfigArgs) ToConfidentialInstanceConfigPtrOutputWithContext(ctx context.Context) ConfidentialInstanceConfigPtrOutput

type ConfidentialInstanceConfigInput

type ConfidentialInstanceConfigInput interface {
	pulumi.Input

	ToConfidentialInstanceConfigOutput() ConfidentialInstanceConfigOutput
	ToConfidentialInstanceConfigOutputWithContext(context.Context) ConfidentialInstanceConfigOutput
}

ConfidentialInstanceConfigInput is an input type that accepts ConfidentialInstanceConfigArgs and ConfidentialInstanceConfigOutput values. You can construct a concrete instance of `ConfidentialInstanceConfigInput` via:

ConfidentialInstanceConfigArgs{...}

type ConfidentialInstanceConfigOutput

type ConfidentialInstanceConfigOutput struct{ *pulumi.OutputState }

Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)

func (ConfidentialInstanceConfigOutput) ElementType

func (ConfidentialInstanceConfigOutput) EnableConfidentialCompute

func (o ConfidentialInstanceConfigOutput) EnableConfidentialCompute() pulumi.BoolPtrOutput

Optional. Defines whether the instance should have confidential compute enabled.

func (ConfidentialInstanceConfigOutput) ToConfidentialInstanceConfigOutput

func (o ConfidentialInstanceConfigOutput) ToConfidentialInstanceConfigOutput() ConfidentialInstanceConfigOutput

func (ConfidentialInstanceConfigOutput) ToConfidentialInstanceConfigOutputWithContext

func (o ConfidentialInstanceConfigOutput) ToConfidentialInstanceConfigOutputWithContext(ctx context.Context) ConfidentialInstanceConfigOutput

func (ConfidentialInstanceConfigOutput) ToConfidentialInstanceConfigPtrOutput

func (o ConfidentialInstanceConfigOutput) ToConfidentialInstanceConfigPtrOutput() ConfidentialInstanceConfigPtrOutput

func (ConfidentialInstanceConfigOutput) ToConfidentialInstanceConfigPtrOutputWithContext

func (o ConfidentialInstanceConfigOutput) ToConfidentialInstanceConfigPtrOutputWithContext(ctx context.Context) ConfidentialInstanceConfigPtrOutput

type ConfidentialInstanceConfigPtrInput

type ConfidentialInstanceConfigPtrInput interface {
	pulumi.Input

	ToConfidentialInstanceConfigPtrOutput() ConfidentialInstanceConfigPtrOutput
	ToConfidentialInstanceConfigPtrOutputWithContext(context.Context) ConfidentialInstanceConfigPtrOutput
}

ConfidentialInstanceConfigPtrInput is an input type that accepts ConfidentialInstanceConfigArgs, ConfidentialInstanceConfigPtr and ConfidentialInstanceConfigPtrOutput values. You can construct a concrete instance of `ConfidentialInstanceConfigPtrInput` via:

        ConfidentialInstanceConfigArgs{...}

or:

        nil

type ConfidentialInstanceConfigPtrOutput

type ConfidentialInstanceConfigPtrOutput struct{ *pulumi.OutputState }

func (ConfidentialInstanceConfigPtrOutput) Elem

func (ConfidentialInstanceConfigPtrOutput) ElementType

func (ConfidentialInstanceConfigPtrOutput) EnableConfidentialCompute

func (o ConfidentialInstanceConfigPtrOutput) EnableConfidentialCompute() pulumi.BoolPtrOutput

Optional. Defines whether the instance should have confidential compute enabled.

func (ConfidentialInstanceConfigPtrOutput) ToConfidentialInstanceConfigPtrOutput

func (o ConfidentialInstanceConfigPtrOutput) ToConfidentialInstanceConfigPtrOutput() ConfidentialInstanceConfigPtrOutput

func (ConfidentialInstanceConfigPtrOutput) ToConfidentialInstanceConfigPtrOutputWithContext

func (o ConfidentialInstanceConfigPtrOutput) ToConfidentialInstanceConfigPtrOutputWithContext(ctx context.Context) ConfidentialInstanceConfigPtrOutput

type ConfidentialInstanceConfigResponse

type ConfidentialInstanceConfigResponse struct {
	// Optional. Defines whether the instance should have confidential compute enabled.
	EnableConfidentialCompute bool `pulumi:"enableConfidentialCompute"`
}

Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)

type ConfidentialInstanceConfigResponseOutput

type ConfidentialInstanceConfigResponseOutput struct{ *pulumi.OutputState }

Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)

func (ConfidentialInstanceConfigResponseOutput) ElementType

func (ConfidentialInstanceConfigResponseOutput) EnableConfidentialCompute

func (o ConfidentialInstanceConfigResponseOutput) EnableConfidentialCompute() pulumi.BoolOutput

Optional. Defines whether the instance should have confidential compute enabled.

func (ConfidentialInstanceConfigResponseOutput) ToConfidentialInstanceConfigResponseOutput

func (o ConfidentialInstanceConfigResponseOutput) ToConfidentialInstanceConfigResponseOutput() ConfidentialInstanceConfigResponseOutput

func (ConfidentialInstanceConfigResponseOutput) ToConfidentialInstanceConfigResponseOutputWithContext

func (o ConfidentialInstanceConfigResponseOutput) ToConfidentialInstanceConfigResponseOutputWithContext(ctx context.Context) ConfidentialInstanceConfigResponseOutput

type DataprocMetricConfig added in v0.15.0

type DataprocMetricConfig struct {
	// Metrics sources to enable.
	Metrics []Metric `pulumi:"metrics"`
}

Dataproc metric config.

type DataprocMetricConfigArgs added in v0.15.0

type DataprocMetricConfigArgs struct {
	// Metrics sources to enable.
	Metrics MetricArrayInput `pulumi:"metrics"`
}

Dataproc metric config.

func (DataprocMetricConfigArgs) ElementType added in v0.15.0

func (DataprocMetricConfigArgs) ElementType() reflect.Type

func (DataprocMetricConfigArgs) ToDataprocMetricConfigOutput added in v0.15.0

func (i DataprocMetricConfigArgs) ToDataprocMetricConfigOutput() DataprocMetricConfigOutput

func (DataprocMetricConfigArgs) ToDataprocMetricConfigOutputWithContext added in v0.15.0

func (i DataprocMetricConfigArgs) ToDataprocMetricConfigOutputWithContext(ctx context.Context) DataprocMetricConfigOutput

func (DataprocMetricConfigArgs) ToDataprocMetricConfigPtrOutput added in v0.15.0

func (i DataprocMetricConfigArgs) ToDataprocMetricConfigPtrOutput() DataprocMetricConfigPtrOutput

func (DataprocMetricConfigArgs) ToDataprocMetricConfigPtrOutputWithContext added in v0.15.0

func (i DataprocMetricConfigArgs) ToDataprocMetricConfigPtrOutputWithContext(ctx context.Context) DataprocMetricConfigPtrOutput

type DataprocMetricConfigInput added in v0.15.0

type DataprocMetricConfigInput interface {
	pulumi.Input

	ToDataprocMetricConfigOutput() DataprocMetricConfigOutput
	ToDataprocMetricConfigOutputWithContext(context.Context) DataprocMetricConfigOutput
}

DataprocMetricConfigInput is an input type that accepts DataprocMetricConfigArgs and DataprocMetricConfigOutput values. You can construct a concrete instance of `DataprocMetricConfigInput` via:

DataprocMetricConfigArgs{...}

type DataprocMetricConfigOutput added in v0.15.0

type DataprocMetricConfigOutput struct{ *pulumi.OutputState }

Dataproc metric config.

func (DataprocMetricConfigOutput) ElementType added in v0.15.0

func (DataprocMetricConfigOutput) ElementType() reflect.Type

func (DataprocMetricConfigOutput) Metrics added in v0.15.0

Metrics sources to enable.

func (DataprocMetricConfigOutput) ToDataprocMetricConfigOutput added in v0.15.0

func (o DataprocMetricConfigOutput) ToDataprocMetricConfigOutput() DataprocMetricConfigOutput

func (DataprocMetricConfigOutput) ToDataprocMetricConfigOutputWithContext added in v0.15.0

func (o DataprocMetricConfigOutput) ToDataprocMetricConfigOutputWithContext(ctx context.Context) DataprocMetricConfigOutput

func (DataprocMetricConfigOutput) ToDataprocMetricConfigPtrOutput added in v0.15.0

func (o DataprocMetricConfigOutput) ToDataprocMetricConfigPtrOutput() DataprocMetricConfigPtrOutput

func (DataprocMetricConfigOutput) ToDataprocMetricConfigPtrOutputWithContext added in v0.15.0

func (o DataprocMetricConfigOutput) ToDataprocMetricConfigPtrOutputWithContext(ctx context.Context) DataprocMetricConfigPtrOutput

type DataprocMetricConfigPtrInput added in v0.15.0

type DataprocMetricConfigPtrInput interface {
	pulumi.Input

	ToDataprocMetricConfigPtrOutput() DataprocMetricConfigPtrOutput
	ToDataprocMetricConfigPtrOutputWithContext(context.Context) DataprocMetricConfigPtrOutput
}

DataprocMetricConfigPtrInput is an input type that accepts DataprocMetricConfigArgs, DataprocMetricConfigPtr and DataprocMetricConfigPtrOutput values. You can construct a concrete instance of `DataprocMetricConfigPtrInput` via:

        DataprocMetricConfigArgs{...}

or:

        nil

func DataprocMetricConfigPtr added in v0.15.0

func DataprocMetricConfigPtr(v *DataprocMetricConfigArgs) DataprocMetricConfigPtrInput

type DataprocMetricConfigPtrOutput added in v0.15.0

type DataprocMetricConfigPtrOutput struct{ *pulumi.OutputState }

func (DataprocMetricConfigPtrOutput) Elem added in v0.15.0

func (DataprocMetricConfigPtrOutput) ElementType added in v0.15.0

func (DataprocMetricConfigPtrOutput) Metrics added in v0.15.0

Metrics sources to enable.

func (DataprocMetricConfigPtrOutput) ToDataprocMetricConfigPtrOutput added in v0.15.0

func (o DataprocMetricConfigPtrOutput) ToDataprocMetricConfigPtrOutput() DataprocMetricConfigPtrOutput

func (DataprocMetricConfigPtrOutput) ToDataprocMetricConfigPtrOutputWithContext added in v0.15.0

func (o DataprocMetricConfigPtrOutput) ToDataprocMetricConfigPtrOutputWithContext(ctx context.Context) DataprocMetricConfigPtrOutput

type DataprocMetricConfigResponse added in v0.15.0

type DataprocMetricConfigResponse struct {
	// Metrics sources to enable.
	Metrics []MetricResponse `pulumi:"metrics"`
}

Dataproc metric config.

type DataprocMetricConfigResponseOutput added in v0.15.0

type DataprocMetricConfigResponseOutput struct{ *pulumi.OutputState }

Dataproc metric config.

func (DataprocMetricConfigResponseOutput) ElementType added in v0.15.0

func (DataprocMetricConfigResponseOutput) Metrics added in v0.15.0

Metrics sources to enable.

func (DataprocMetricConfigResponseOutput) ToDataprocMetricConfigResponseOutput added in v0.15.0

func (o DataprocMetricConfigResponseOutput) ToDataprocMetricConfigResponseOutput() DataprocMetricConfigResponseOutput

func (DataprocMetricConfigResponseOutput) ToDataprocMetricConfigResponseOutputWithContext added in v0.15.0

func (o DataprocMetricConfigResponseOutput) ToDataprocMetricConfigResponseOutputWithContext(ctx context.Context) DataprocMetricConfigResponseOutput

type DiskConfig

type DiskConfig struct {
	// Optional. Size in GB of the boot disk (default is 500GB).
	BootDiskSizeGb *int `pulumi:"bootDiskSizeGb"`
	// Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).
	BootDiskType *string `pulumi:"bootDiskType"`
	// Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).
	LocalSsdInterface *string `pulumi:"localSsdInterface"`
	// Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.
	NumLocalSsds *int `pulumi:"numLocalSsds"`
}

Specifies the config of disk options for a group of VM instances.

type DiskConfigArgs

type DiskConfigArgs struct {
	// Optional. Size in GB of the boot disk (default is 500GB).
	BootDiskSizeGb pulumi.IntPtrInput `pulumi:"bootDiskSizeGb"`
	// Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).
	BootDiskType pulumi.StringPtrInput `pulumi:"bootDiskType"`
	// Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).
	LocalSsdInterface pulumi.StringPtrInput `pulumi:"localSsdInterface"`
	// Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.
	NumLocalSsds pulumi.IntPtrInput `pulumi:"numLocalSsds"`
}

Specifies the config of disk options for a group of VM instances.

func (DiskConfigArgs) ElementType

func (DiskConfigArgs) ElementType() reflect.Type

func (DiskConfigArgs) ToDiskConfigOutput

func (i DiskConfigArgs) ToDiskConfigOutput() DiskConfigOutput

func (DiskConfigArgs) ToDiskConfigOutputWithContext

func (i DiskConfigArgs) ToDiskConfigOutputWithContext(ctx context.Context) DiskConfigOutput

func (DiskConfigArgs) ToDiskConfigPtrOutput

func (i DiskConfigArgs) ToDiskConfigPtrOutput() DiskConfigPtrOutput

func (DiskConfigArgs) ToDiskConfigPtrOutputWithContext

func (i DiskConfigArgs) ToDiskConfigPtrOutputWithContext(ctx context.Context) DiskConfigPtrOutput

type DiskConfigInput

type DiskConfigInput interface {
	pulumi.Input

	ToDiskConfigOutput() DiskConfigOutput
	ToDiskConfigOutputWithContext(context.Context) DiskConfigOutput
}

DiskConfigInput is an input type that accepts DiskConfigArgs and DiskConfigOutput values. You can construct a concrete instance of `DiskConfigInput` via:

DiskConfigArgs{...}

type DiskConfigOutput

type DiskConfigOutput struct{ *pulumi.OutputState }

Specifies the config of disk options for a group of VM instances.

func (DiskConfigOutput) BootDiskSizeGb

func (o DiskConfigOutput) BootDiskSizeGb() pulumi.IntPtrOutput

Optional. Size in GB of the boot disk (default is 500GB).

func (DiskConfigOutput) BootDiskType

func (o DiskConfigOutput) BootDiskType() pulumi.StringPtrOutput

Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).

func (DiskConfigOutput) ElementType

func (DiskConfigOutput) ElementType() reflect.Type

func (DiskConfigOutput) LocalSsdInterface added in v0.11.0

func (o DiskConfigOutput) LocalSsdInterface() pulumi.StringPtrOutput

Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).

func (DiskConfigOutput) NumLocalSsds

func (o DiskConfigOutput) NumLocalSsds() pulumi.IntPtrOutput

Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.

func (DiskConfigOutput) ToDiskConfigOutput

func (o DiskConfigOutput) ToDiskConfigOutput() DiskConfigOutput

func (DiskConfigOutput) ToDiskConfigOutputWithContext

func (o DiskConfigOutput) ToDiskConfigOutputWithContext(ctx context.Context) DiskConfigOutput

func (DiskConfigOutput) ToDiskConfigPtrOutput

func (o DiskConfigOutput) ToDiskConfigPtrOutput() DiskConfigPtrOutput

func (DiskConfigOutput) ToDiskConfigPtrOutputWithContext

func (o DiskConfigOutput) ToDiskConfigPtrOutputWithContext(ctx context.Context) DiskConfigPtrOutput

type DiskConfigPtrInput

type DiskConfigPtrInput interface {
	pulumi.Input

	ToDiskConfigPtrOutput() DiskConfigPtrOutput
	ToDiskConfigPtrOutputWithContext(context.Context) DiskConfigPtrOutput
}

DiskConfigPtrInput is an input type that accepts DiskConfigArgs, DiskConfigPtr and DiskConfigPtrOutput values. You can construct a concrete instance of `DiskConfigPtrInput` via:

        DiskConfigArgs{...}

or:

        nil

func DiskConfigPtr

func DiskConfigPtr(v *DiskConfigArgs) DiskConfigPtrInput

type DiskConfigPtrOutput

type DiskConfigPtrOutput struct{ *pulumi.OutputState }

func (DiskConfigPtrOutput) BootDiskSizeGb

func (o DiskConfigPtrOutput) BootDiskSizeGb() pulumi.IntPtrOutput

Optional. Size in GB of the boot disk (default is 500GB).

func (DiskConfigPtrOutput) BootDiskType

func (o DiskConfigPtrOutput) BootDiskType() pulumi.StringPtrOutput

Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).

func (DiskConfigPtrOutput) Elem

func (DiskConfigPtrOutput) ElementType

func (DiskConfigPtrOutput) ElementType() reflect.Type

func (DiskConfigPtrOutput) LocalSsdInterface added in v0.11.0

func (o DiskConfigPtrOutput) LocalSsdInterface() pulumi.StringPtrOutput

Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).

func (DiskConfigPtrOutput) NumLocalSsds

func (o DiskConfigPtrOutput) NumLocalSsds() pulumi.IntPtrOutput

Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.

func (DiskConfigPtrOutput) ToDiskConfigPtrOutput

func (o DiskConfigPtrOutput) ToDiskConfigPtrOutput() DiskConfigPtrOutput

func (DiskConfigPtrOutput) ToDiskConfigPtrOutputWithContext

func (o DiskConfigPtrOutput) ToDiskConfigPtrOutputWithContext(ctx context.Context) DiskConfigPtrOutput

type DiskConfigResponse

type DiskConfigResponse struct {
	// Optional. Size in GB of the boot disk (default is 500GB).
	BootDiskSizeGb int `pulumi:"bootDiskSizeGb"`
	// Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).
	BootDiskType string `pulumi:"bootDiskType"`
	// Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).
	LocalSsdInterface string `pulumi:"localSsdInterface"`
	// Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.
	NumLocalSsds int `pulumi:"numLocalSsds"`
}

Specifies the config of disk options for a group of VM instances.

type DiskConfigResponseOutput

type DiskConfigResponseOutput struct{ *pulumi.OutputState }

Specifies the config of disk options for a group of VM instances.

func (DiskConfigResponseOutput) BootDiskSizeGb

func (o DiskConfigResponseOutput) BootDiskSizeGb() pulumi.IntOutput

Optional. Size in GB of the boot disk (default is 500GB).

func (DiskConfigResponseOutput) BootDiskType

func (o DiskConfigResponseOutput) BootDiskType() pulumi.StringOutput

Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).

func (DiskConfigResponseOutput) ElementType

func (DiskConfigResponseOutput) ElementType() reflect.Type

func (DiskConfigResponseOutput) LocalSsdInterface added in v0.11.0

func (o DiskConfigResponseOutput) LocalSsdInterface() pulumi.StringOutput

Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).

func (DiskConfigResponseOutput) NumLocalSsds

func (o DiskConfigResponseOutput) NumLocalSsds() pulumi.IntOutput

Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.

func (DiskConfigResponseOutput) ToDiskConfigResponseOutput

func (o DiskConfigResponseOutput) ToDiskConfigResponseOutput() DiskConfigResponseOutput

func (DiskConfigResponseOutput) ToDiskConfigResponseOutputWithContext

func (o DiskConfigResponseOutput) ToDiskConfigResponseOutputWithContext(ctx context.Context) DiskConfigResponseOutput

type DriverSchedulingConfig added in v0.28.0

type DriverSchedulingConfig struct {
	// The amount of memory in MB the driver is requesting.
	MemoryMb int `pulumi:"memoryMb"`
	// The number of vCPUs the driver is requesting.
	Vcores int `pulumi:"vcores"`
}

Driver scheduling configuration.

type DriverSchedulingConfigArgs added in v0.28.0

type DriverSchedulingConfigArgs struct {
	// The amount of memory in MB the driver is requesting.
	MemoryMb pulumi.IntInput `pulumi:"memoryMb"`
	// The number of vCPUs the driver is requesting.
	Vcores pulumi.IntInput `pulumi:"vcores"`
}

Driver scheduling configuration.

func (DriverSchedulingConfigArgs) ElementType added in v0.28.0

func (DriverSchedulingConfigArgs) ElementType() reflect.Type

func (DriverSchedulingConfigArgs) ToDriverSchedulingConfigOutput added in v0.28.0

func (i DriverSchedulingConfigArgs) ToDriverSchedulingConfigOutput() DriverSchedulingConfigOutput

func (DriverSchedulingConfigArgs) ToDriverSchedulingConfigOutputWithContext added in v0.28.0

func (i DriverSchedulingConfigArgs) ToDriverSchedulingConfigOutputWithContext(ctx context.Context) DriverSchedulingConfigOutput

func (DriverSchedulingConfigArgs) ToDriverSchedulingConfigPtrOutput added in v0.28.0

func (i DriverSchedulingConfigArgs) ToDriverSchedulingConfigPtrOutput() DriverSchedulingConfigPtrOutput

func (DriverSchedulingConfigArgs) ToDriverSchedulingConfigPtrOutputWithContext added in v0.28.0

func (i DriverSchedulingConfigArgs) ToDriverSchedulingConfigPtrOutputWithContext(ctx context.Context) DriverSchedulingConfigPtrOutput

type DriverSchedulingConfigInput added in v0.28.0

type DriverSchedulingConfigInput interface {
	pulumi.Input

	ToDriverSchedulingConfigOutput() DriverSchedulingConfigOutput
	ToDriverSchedulingConfigOutputWithContext(context.Context) DriverSchedulingConfigOutput
}

DriverSchedulingConfigInput is an input type that accepts DriverSchedulingConfigArgs and DriverSchedulingConfigOutput values. You can construct a concrete instance of `DriverSchedulingConfigInput` via:

DriverSchedulingConfigArgs{...}

type DriverSchedulingConfigOutput added in v0.28.0

type DriverSchedulingConfigOutput struct{ *pulumi.OutputState }

Driver scheduling configuration.

func (DriverSchedulingConfigOutput) ElementType added in v0.28.0

func (DriverSchedulingConfigOutput) MemoryMb added in v0.28.0

The amount of memory in MB the driver is requesting.

func (DriverSchedulingConfigOutput) ToDriverSchedulingConfigOutput added in v0.28.0

func (o DriverSchedulingConfigOutput) ToDriverSchedulingConfigOutput() DriverSchedulingConfigOutput

func (DriverSchedulingConfigOutput) ToDriverSchedulingConfigOutputWithContext added in v0.28.0

func (o DriverSchedulingConfigOutput) ToDriverSchedulingConfigOutputWithContext(ctx context.Context) DriverSchedulingConfigOutput

func (DriverSchedulingConfigOutput) ToDriverSchedulingConfigPtrOutput added in v0.28.0

func (o DriverSchedulingConfigOutput) ToDriverSchedulingConfigPtrOutput() DriverSchedulingConfigPtrOutput

func (DriverSchedulingConfigOutput) ToDriverSchedulingConfigPtrOutputWithContext added in v0.28.0

func (o DriverSchedulingConfigOutput) ToDriverSchedulingConfigPtrOutputWithContext(ctx context.Context) DriverSchedulingConfigPtrOutput

func (DriverSchedulingConfigOutput) Vcores added in v0.28.0

The number of vCPUs the driver is requesting.

type DriverSchedulingConfigPtrInput added in v0.28.0

type DriverSchedulingConfigPtrInput interface {
	pulumi.Input

	ToDriverSchedulingConfigPtrOutput() DriverSchedulingConfigPtrOutput
	ToDriverSchedulingConfigPtrOutputWithContext(context.Context) DriverSchedulingConfigPtrOutput
}

DriverSchedulingConfigPtrInput is an input type that accepts DriverSchedulingConfigArgs, DriverSchedulingConfigPtr and DriverSchedulingConfigPtrOutput values. You can construct a concrete instance of `DriverSchedulingConfigPtrInput` via:

        DriverSchedulingConfigArgs{...}

or:

        nil

func DriverSchedulingConfigPtr added in v0.28.0

func DriverSchedulingConfigPtr(v *DriverSchedulingConfigArgs) DriverSchedulingConfigPtrInput

type DriverSchedulingConfigPtrOutput added in v0.28.0

type DriverSchedulingConfigPtrOutput struct{ *pulumi.OutputState }

func (DriverSchedulingConfigPtrOutput) Elem added in v0.28.0

func (DriverSchedulingConfigPtrOutput) ElementType added in v0.28.0

func (DriverSchedulingConfigPtrOutput) MemoryMb added in v0.28.0

The amount of memory in MB the driver is requesting.

func (DriverSchedulingConfigPtrOutput) ToDriverSchedulingConfigPtrOutput added in v0.28.0

func (o DriverSchedulingConfigPtrOutput) ToDriverSchedulingConfigPtrOutput() DriverSchedulingConfigPtrOutput

func (DriverSchedulingConfigPtrOutput) ToDriverSchedulingConfigPtrOutputWithContext added in v0.28.0

func (o DriverSchedulingConfigPtrOutput) ToDriverSchedulingConfigPtrOutputWithContext(ctx context.Context) DriverSchedulingConfigPtrOutput

func (DriverSchedulingConfigPtrOutput) Vcores added in v0.28.0

The number of vCPUs the driver is requesting.

type DriverSchedulingConfigResponse added in v0.28.0

type DriverSchedulingConfigResponse struct {
	// The amount of memory in MB the driver is requesting.
	MemoryMb int `pulumi:"memoryMb"`
	// The number of vCPUs the driver is requesting.
	Vcores int `pulumi:"vcores"`
}

Driver scheduling configuration.

type DriverSchedulingConfigResponseOutput added in v0.28.0

type DriverSchedulingConfigResponseOutput struct{ *pulumi.OutputState }

Driver scheduling configuration.

func (DriverSchedulingConfigResponseOutput) ElementType added in v0.28.0

func (DriverSchedulingConfigResponseOutput) MemoryMb added in v0.28.0

The amount of memory in MB the driver is requesting.

func (DriverSchedulingConfigResponseOutput) ToDriverSchedulingConfigResponseOutput added in v0.28.0

func (o DriverSchedulingConfigResponseOutput) ToDriverSchedulingConfigResponseOutput() DriverSchedulingConfigResponseOutput

func (DriverSchedulingConfigResponseOutput) ToDriverSchedulingConfigResponseOutputWithContext added in v0.28.0

func (o DriverSchedulingConfigResponseOutput) ToDriverSchedulingConfigResponseOutputWithContext(ctx context.Context) DriverSchedulingConfigResponseOutput

func (DriverSchedulingConfigResponseOutput) Vcores added in v0.28.0

The number of vCPUs the driver is requesting.

type EncryptionConfig

type EncryptionConfig struct {
	// Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
	GcePdKmsKeyName *string `pulumi:"gcePdKmsKeyName"`
	// Optional. The Cloud KMS key name to use for encrypting customer core content in spanner and cluster PD disk for all instances in the cluster.
	KmsKey *string `pulumi:"kmsKey"`
}

Encryption settings for the cluster.

type EncryptionConfigArgs

type EncryptionConfigArgs struct {
	// Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
	GcePdKmsKeyName pulumi.StringPtrInput `pulumi:"gcePdKmsKeyName"`
	// Optional. The Cloud KMS key name to use for encrypting customer core content in spanner and cluster PD disk for all instances in the cluster.
	KmsKey pulumi.StringPtrInput `pulumi:"kmsKey"`
}

Encryption settings for the cluster.

func (EncryptionConfigArgs) ElementType

func (EncryptionConfigArgs) ElementType() reflect.Type

func (EncryptionConfigArgs) ToEncryptionConfigOutput

func (i EncryptionConfigArgs) ToEncryptionConfigOutput() EncryptionConfigOutput

func (EncryptionConfigArgs) ToEncryptionConfigOutputWithContext

func (i EncryptionConfigArgs) ToEncryptionConfigOutputWithContext(ctx context.Context) EncryptionConfigOutput

func (EncryptionConfigArgs) ToEncryptionConfigPtrOutput

func (i EncryptionConfigArgs) ToEncryptionConfigPtrOutput() EncryptionConfigPtrOutput

func (EncryptionConfigArgs) ToEncryptionConfigPtrOutputWithContext

func (i EncryptionConfigArgs) ToEncryptionConfigPtrOutputWithContext(ctx context.Context) EncryptionConfigPtrOutput

type EncryptionConfigInput

type EncryptionConfigInput interface {
	pulumi.Input

	ToEncryptionConfigOutput() EncryptionConfigOutput
	ToEncryptionConfigOutputWithContext(context.Context) EncryptionConfigOutput
}

EncryptionConfigInput is an input type that accepts EncryptionConfigArgs and EncryptionConfigOutput values. You can construct a concrete instance of `EncryptionConfigInput` via:

EncryptionConfigArgs{...}

type EncryptionConfigOutput

type EncryptionConfigOutput struct{ *pulumi.OutputState }

Encryption settings for the cluster.

func (EncryptionConfigOutput) ElementType

func (EncryptionConfigOutput) ElementType() reflect.Type

func (EncryptionConfigOutput) GcePdKmsKeyName

func (o EncryptionConfigOutput) GcePdKmsKeyName() pulumi.StringPtrOutput

Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.

func (EncryptionConfigOutput) KmsKey added in v0.29.0

Optional. The Cloud KMS key name to use for encrypting customer core content in spanner and cluster PD disk for all instances in the cluster.

func (EncryptionConfigOutput) ToEncryptionConfigOutput

func (o EncryptionConfigOutput) ToEncryptionConfigOutput() EncryptionConfigOutput

func (EncryptionConfigOutput) ToEncryptionConfigOutputWithContext

func (o EncryptionConfigOutput) ToEncryptionConfigOutputWithContext(ctx context.Context) EncryptionConfigOutput

func (EncryptionConfigOutput) ToEncryptionConfigPtrOutput

func (o EncryptionConfigOutput) ToEncryptionConfigPtrOutput() EncryptionConfigPtrOutput

func (EncryptionConfigOutput) ToEncryptionConfigPtrOutputWithContext

func (o EncryptionConfigOutput) ToEncryptionConfigPtrOutputWithContext(ctx context.Context) EncryptionConfigPtrOutput

type EncryptionConfigPtrInput

type EncryptionConfigPtrInput interface {
	pulumi.Input

	ToEncryptionConfigPtrOutput() EncryptionConfigPtrOutput
	ToEncryptionConfigPtrOutputWithContext(context.Context) EncryptionConfigPtrOutput
}

EncryptionConfigPtrInput is an input type that accepts EncryptionConfigArgs, EncryptionConfigPtr and EncryptionConfigPtrOutput values. You can construct a concrete instance of `EncryptionConfigPtrInput` via:

        EncryptionConfigArgs{...}

or:

        nil

type EncryptionConfigPtrOutput

type EncryptionConfigPtrOutput struct{ *pulumi.OutputState }

func (EncryptionConfigPtrOutput) Elem

func (EncryptionConfigPtrOutput) ElementType

func (EncryptionConfigPtrOutput) ElementType() reflect.Type

func (EncryptionConfigPtrOutput) GcePdKmsKeyName

func (o EncryptionConfigPtrOutput) GcePdKmsKeyName() pulumi.StringPtrOutput

Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.

func (EncryptionConfigPtrOutput) KmsKey added in v0.29.0

Optional. The Cloud KMS key name to use for encrypting customer core content in spanner and cluster PD disk for all instances in the cluster.

func (EncryptionConfigPtrOutput) ToEncryptionConfigPtrOutput

func (o EncryptionConfigPtrOutput) ToEncryptionConfigPtrOutput() EncryptionConfigPtrOutput

func (EncryptionConfigPtrOutput) ToEncryptionConfigPtrOutputWithContext

func (o EncryptionConfigPtrOutput) ToEncryptionConfigPtrOutputWithContext(ctx context.Context) EncryptionConfigPtrOutput

type EncryptionConfigResponse

type EncryptionConfigResponse struct {
	// Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
	GcePdKmsKeyName string `pulumi:"gcePdKmsKeyName"`
	// Optional. The Cloud KMS key name to use for encrypting customer core content in spanner and cluster PD disk for all instances in the cluster.
	KmsKey string `pulumi:"kmsKey"`
}

Encryption settings for the cluster.

type EncryptionConfigResponseOutput

type EncryptionConfigResponseOutput struct{ *pulumi.OutputState }

Encryption settings for the cluster.

func (EncryptionConfigResponseOutput) ElementType

func (EncryptionConfigResponseOutput) GcePdKmsKeyName

Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.

func (EncryptionConfigResponseOutput) KmsKey added in v0.29.0

Optional. The Cloud KMS key name to use for encrypting customer core content in spanner and cluster PD disk for all instances in the cluster.

func (EncryptionConfigResponseOutput) ToEncryptionConfigResponseOutput

func (o EncryptionConfigResponseOutput) ToEncryptionConfigResponseOutput() EncryptionConfigResponseOutput

func (EncryptionConfigResponseOutput) ToEncryptionConfigResponseOutputWithContext

func (o EncryptionConfigResponseOutput) ToEncryptionConfigResponseOutputWithContext(ctx context.Context) EncryptionConfigResponseOutput

type EndpointConfig

type EndpointConfig struct {
	// Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
	EnableHttpPortAccess *bool `pulumi:"enableHttpPortAccess"`
}

Endpoint config for this cluster

type EndpointConfigArgs

type EndpointConfigArgs struct {
	// Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
	EnableHttpPortAccess pulumi.BoolPtrInput `pulumi:"enableHttpPortAccess"`
}

Endpoint config for this cluster

func (EndpointConfigArgs) ElementType

func (EndpointConfigArgs) ElementType() reflect.Type

func (EndpointConfigArgs) ToEndpointConfigOutput

func (i EndpointConfigArgs) ToEndpointConfigOutput() EndpointConfigOutput

func (EndpointConfigArgs) ToEndpointConfigOutputWithContext

func (i EndpointConfigArgs) ToEndpointConfigOutputWithContext(ctx context.Context) EndpointConfigOutput

func (EndpointConfigArgs) ToEndpointConfigPtrOutput

func (i EndpointConfigArgs) ToEndpointConfigPtrOutput() EndpointConfigPtrOutput

func (EndpointConfigArgs) ToEndpointConfigPtrOutputWithContext

func (i EndpointConfigArgs) ToEndpointConfigPtrOutputWithContext(ctx context.Context) EndpointConfigPtrOutput

type EndpointConfigInput

type EndpointConfigInput interface {
	pulumi.Input

	ToEndpointConfigOutput() EndpointConfigOutput
	ToEndpointConfigOutputWithContext(context.Context) EndpointConfigOutput
}

EndpointConfigInput is an input type that accepts EndpointConfigArgs and EndpointConfigOutput values. You can construct a concrete instance of `EndpointConfigInput` via:

EndpointConfigArgs{...}

type EndpointConfigOutput

type EndpointConfigOutput struct{ *pulumi.OutputState }

Endpoint config for this cluster

func (EndpointConfigOutput) ElementType

func (EndpointConfigOutput) ElementType() reflect.Type

func (EndpointConfigOutput) EnableHttpPortAccess

func (o EndpointConfigOutput) EnableHttpPortAccess() pulumi.BoolPtrOutput

Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.

func (EndpointConfigOutput) ToEndpointConfigOutput

func (o EndpointConfigOutput) ToEndpointConfigOutput() EndpointConfigOutput

func (EndpointConfigOutput) ToEndpointConfigOutputWithContext

func (o EndpointConfigOutput) ToEndpointConfigOutputWithContext(ctx context.Context) EndpointConfigOutput

func (EndpointConfigOutput) ToEndpointConfigPtrOutput

func (o EndpointConfigOutput) ToEndpointConfigPtrOutput() EndpointConfigPtrOutput

func (EndpointConfigOutput) ToEndpointConfigPtrOutputWithContext

func (o EndpointConfigOutput) ToEndpointConfigPtrOutputWithContext(ctx context.Context) EndpointConfigPtrOutput

type EndpointConfigPtrInput

type EndpointConfigPtrInput interface {
	pulumi.Input

	ToEndpointConfigPtrOutput() EndpointConfigPtrOutput
	ToEndpointConfigPtrOutputWithContext(context.Context) EndpointConfigPtrOutput
}

EndpointConfigPtrInput is an input type that accepts EndpointConfigArgs, EndpointConfigPtr and EndpointConfigPtrOutput values. You can construct a concrete instance of `EndpointConfigPtrInput` via:

        EndpointConfigArgs{...}

or:

        nil

type EndpointConfigPtrOutput

type EndpointConfigPtrOutput struct{ *pulumi.OutputState }

func (EndpointConfigPtrOutput) Elem

func (EndpointConfigPtrOutput) ElementType

func (EndpointConfigPtrOutput) ElementType() reflect.Type

func (EndpointConfigPtrOutput) EnableHttpPortAccess

func (o EndpointConfigPtrOutput) EnableHttpPortAccess() pulumi.BoolPtrOutput

Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.

func (EndpointConfigPtrOutput) ToEndpointConfigPtrOutput

func (o EndpointConfigPtrOutput) ToEndpointConfigPtrOutput() EndpointConfigPtrOutput

func (EndpointConfigPtrOutput) ToEndpointConfigPtrOutputWithContext

func (o EndpointConfigPtrOutput) ToEndpointConfigPtrOutputWithContext(ctx context.Context) EndpointConfigPtrOutput

type EndpointConfigResponse

type EndpointConfigResponse struct {
	// Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
	EnableHttpPortAccess bool `pulumi:"enableHttpPortAccess"`
	// The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
	HttpPorts map[string]string `pulumi:"httpPorts"`
}

Endpoint config for this cluster

type EndpointConfigResponseOutput

type EndpointConfigResponseOutput struct{ *pulumi.OutputState }

Endpoint config for this cluster

func (EndpointConfigResponseOutput) ElementType

func (EndpointConfigResponseOutput) EnableHttpPortAccess

func (o EndpointConfigResponseOutput) EnableHttpPortAccess() pulumi.BoolOutput

Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.

func (EndpointConfigResponseOutput) HttpPorts

The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.

func (EndpointConfigResponseOutput) ToEndpointConfigResponseOutput

func (o EndpointConfigResponseOutput) ToEndpointConfigResponseOutput() EndpointConfigResponseOutput

func (EndpointConfigResponseOutput) ToEndpointConfigResponseOutputWithContext

func (o EndpointConfigResponseOutput) ToEndpointConfigResponseOutputWithContext(ctx context.Context) EndpointConfigResponseOutput

type EnvironmentConfig added in v0.12.0

type EnvironmentConfig struct {
	// Optional. Execution configuration for a workload.
	ExecutionConfig *ExecutionConfig `pulumi:"executionConfig"`
	// Optional. Peripherals configuration that workload has access to.
	PeripheralsConfig *PeripheralsConfig `pulumi:"peripheralsConfig"`
}

Environment configuration for a workload.

type EnvironmentConfigArgs added in v0.12.0

type EnvironmentConfigArgs struct {
	// Optional. Execution configuration for a workload.
	ExecutionConfig ExecutionConfigPtrInput `pulumi:"executionConfig"`
	// Optional. Peripherals configuration that workload has access to.
	PeripheralsConfig PeripheralsConfigPtrInput `pulumi:"peripheralsConfig"`
}

Environment configuration for a workload.

func (EnvironmentConfigArgs) ElementType added in v0.12.0

func (EnvironmentConfigArgs) ElementType() reflect.Type

func (EnvironmentConfigArgs) ToEnvironmentConfigOutput added in v0.12.0

func (i EnvironmentConfigArgs) ToEnvironmentConfigOutput() EnvironmentConfigOutput

func (EnvironmentConfigArgs) ToEnvironmentConfigOutputWithContext added in v0.12.0

func (i EnvironmentConfigArgs) ToEnvironmentConfigOutputWithContext(ctx context.Context) EnvironmentConfigOutput

func (EnvironmentConfigArgs) ToEnvironmentConfigPtrOutput added in v0.12.0

func (i EnvironmentConfigArgs) ToEnvironmentConfigPtrOutput() EnvironmentConfigPtrOutput

func (EnvironmentConfigArgs) ToEnvironmentConfigPtrOutputWithContext added in v0.12.0

func (i EnvironmentConfigArgs) ToEnvironmentConfigPtrOutputWithContext(ctx context.Context) EnvironmentConfigPtrOutput

type EnvironmentConfigInput added in v0.12.0

type EnvironmentConfigInput interface {
	pulumi.Input

	ToEnvironmentConfigOutput() EnvironmentConfigOutput
	ToEnvironmentConfigOutputWithContext(context.Context) EnvironmentConfigOutput
}

EnvironmentConfigInput is an input type that accepts EnvironmentConfigArgs and EnvironmentConfigOutput values. You can construct a concrete instance of `EnvironmentConfigInput` via:

EnvironmentConfigArgs{...}

type EnvironmentConfigOutput added in v0.12.0

type EnvironmentConfigOutput struct{ *pulumi.OutputState }

Environment configuration for a workload.

func (EnvironmentConfigOutput) ElementType added in v0.12.0

func (EnvironmentConfigOutput) ElementType() reflect.Type

func (EnvironmentConfigOutput) ExecutionConfig added in v0.12.0

Optional. Execution configuration for a workload.

func (EnvironmentConfigOutput) PeripheralsConfig added in v0.12.0

Optional. Peripherals configuration that workload has access to.

func (EnvironmentConfigOutput) ToEnvironmentConfigOutput added in v0.12.0

func (o EnvironmentConfigOutput) ToEnvironmentConfigOutput() EnvironmentConfigOutput

func (EnvironmentConfigOutput) ToEnvironmentConfigOutputWithContext added in v0.12.0

func (o EnvironmentConfigOutput) ToEnvironmentConfigOutputWithContext(ctx context.Context) EnvironmentConfigOutput

func (EnvironmentConfigOutput) ToEnvironmentConfigPtrOutput added in v0.12.0

func (o EnvironmentConfigOutput) ToEnvironmentConfigPtrOutput() EnvironmentConfigPtrOutput

func (EnvironmentConfigOutput) ToEnvironmentConfigPtrOutputWithContext added in v0.12.0

func (o EnvironmentConfigOutput) ToEnvironmentConfigPtrOutputWithContext(ctx context.Context) EnvironmentConfigPtrOutput

type EnvironmentConfigPtrInput added in v0.12.0

type EnvironmentConfigPtrInput interface {
	pulumi.Input

	ToEnvironmentConfigPtrOutput() EnvironmentConfigPtrOutput
	ToEnvironmentConfigPtrOutputWithContext(context.Context) EnvironmentConfigPtrOutput
}

EnvironmentConfigPtrInput is an input type that accepts EnvironmentConfigArgs, EnvironmentConfigPtr and EnvironmentConfigPtrOutput values. You can construct a concrete instance of `EnvironmentConfigPtrInput` via:

        EnvironmentConfigArgs{...}

or:

        nil

func EnvironmentConfigPtr added in v0.12.0

func EnvironmentConfigPtr(v *EnvironmentConfigArgs) EnvironmentConfigPtrInput

type EnvironmentConfigPtrOutput added in v0.12.0

type EnvironmentConfigPtrOutput struct{ *pulumi.OutputState }

func (EnvironmentConfigPtrOutput) Elem added in v0.12.0

func (EnvironmentConfigPtrOutput) ElementType added in v0.12.0

func (EnvironmentConfigPtrOutput) ElementType() reflect.Type

func (EnvironmentConfigPtrOutput) ExecutionConfig added in v0.12.0

Optional. Execution configuration for a workload.

func (EnvironmentConfigPtrOutput) PeripheralsConfig added in v0.12.0

Optional. Peripherals configuration that workload has access to.

func (EnvironmentConfigPtrOutput) ToEnvironmentConfigPtrOutput added in v0.12.0

func (o EnvironmentConfigPtrOutput) ToEnvironmentConfigPtrOutput() EnvironmentConfigPtrOutput

func (EnvironmentConfigPtrOutput) ToEnvironmentConfigPtrOutputWithContext added in v0.12.0

func (o EnvironmentConfigPtrOutput) ToEnvironmentConfigPtrOutputWithContext(ctx context.Context) EnvironmentConfigPtrOutput

type EnvironmentConfigResponse added in v0.12.0

type EnvironmentConfigResponse struct {
	// Optional. Execution configuration for a workload.
	ExecutionConfig ExecutionConfigResponse `pulumi:"executionConfig"`
	// Optional. Peripherals configuration that workload has access to.
	PeripheralsConfig PeripheralsConfigResponse `pulumi:"peripheralsConfig"`
}

Environment configuration for a workload.

type EnvironmentConfigResponseOutput added in v0.12.0

type EnvironmentConfigResponseOutput struct{ *pulumi.OutputState }

Environment configuration for a workload.

func (EnvironmentConfigResponseOutput) ElementType added in v0.12.0

func (EnvironmentConfigResponseOutput) ExecutionConfig added in v0.12.0

Optional. Execution configuration for a workload.

func (EnvironmentConfigResponseOutput) PeripheralsConfig added in v0.12.0

Optional. Peripherals configuration that workload has access to.

func (EnvironmentConfigResponseOutput) ToEnvironmentConfigResponseOutput added in v0.12.0

func (o EnvironmentConfigResponseOutput) ToEnvironmentConfigResponseOutput() EnvironmentConfigResponseOutput

func (EnvironmentConfigResponseOutput) ToEnvironmentConfigResponseOutputWithContext added in v0.12.0

func (o EnvironmentConfigResponseOutput) ToEnvironmentConfigResponseOutputWithContext(ctx context.Context) EnvironmentConfigResponseOutput

type ExecutionConfig added in v0.12.0

type ExecutionConfig struct {
	// Optional. Applies to sessions only. The duration to keep the session alive while it's idling. Exceeding this threshold causes the session to terminate. This field cannot be set on a batch workload. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults to 1 hour if not set. If both ttl and idle_ttl are specified for an interactive session, the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.
	IdleTtl *string `pulumi:"idleTtl"`
	// Optional. The Cloud KMS key to use for encryption.
	KmsKey *string `pulumi:"kmsKey"`
	// Optional. Tags used for network traffic control.
	NetworkTags []string `pulumi:"networkTags"`
	// Optional. Network URI to connect workload to.
	NetworkUri *string `pulumi:"networkUri"`
	// Optional. Service account that used to execute workload.
	ServiceAccount *string `pulumi:"serviceAccount"`
	// Optional. A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
	StagingBucket *string `pulumi:"stagingBucket"`
	// Optional. Subnetwork URI to connect workload to.
	SubnetworkUri *string `pulumi:"subnetworkUri"`
	// Optional. The duration after which the workload will be terminated, specified as the JSON representation for Duration (https://protobuf.dev/programming-guides/proto3/#json). When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idle_ttl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.
	Ttl *string `pulumi:"ttl"`
}

Execution configuration for a workload.

type ExecutionConfigArgs added in v0.12.0

type ExecutionConfigArgs struct {
	// Optional. Applies to sessions only. The duration to keep the session alive while it's idling. Exceeding this threshold causes the session to terminate. This field cannot be set on a batch workload. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults to 1 hour if not set. If both ttl and idle_ttl are specified for an interactive session, the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.
	IdleTtl pulumi.StringPtrInput `pulumi:"idleTtl"`
	// Optional. The Cloud KMS key to use for encryption.
	KmsKey pulumi.StringPtrInput `pulumi:"kmsKey"`
	// Optional. Tags used for network traffic control.
	NetworkTags pulumi.StringArrayInput `pulumi:"networkTags"`
	// Optional. Network URI to connect workload to.
	NetworkUri pulumi.StringPtrInput `pulumi:"networkUri"`
	// Optional. Service account that used to execute workload.
	ServiceAccount pulumi.StringPtrInput `pulumi:"serviceAccount"`
	// Optional. A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
	StagingBucket pulumi.StringPtrInput `pulumi:"stagingBucket"`
	// Optional. Subnetwork URI to connect workload to.
	SubnetworkUri pulumi.StringPtrInput `pulumi:"subnetworkUri"`
	// Optional. The duration after which the workload will be terminated, specified as the JSON representation for Duration (https://protobuf.dev/programming-guides/proto3/#json). When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idle_ttl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.
	Ttl pulumi.StringPtrInput `pulumi:"ttl"`
}

Execution configuration for a workload.

func (ExecutionConfigArgs) ElementType added in v0.12.0

func (ExecutionConfigArgs) ElementType() reflect.Type

func (ExecutionConfigArgs) ToExecutionConfigOutput added in v0.12.0

func (i ExecutionConfigArgs) ToExecutionConfigOutput() ExecutionConfigOutput

func (ExecutionConfigArgs) ToExecutionConfigOutputWithContext added in v0.12.0

func (i ExecutionConfigArgs) ToExecutionConfigOutputWithContext(ctx context.Context) ExecutionConfigOutput

func (ExecutionConfigArgs) ToExecutionConfigPtrOutput added in v0.12.0

func (i ExecutionConfigArgs) ToExecutionConfigPtrOutput() ExecutionConfigPtrOutput

func (ExecutionConfigArgs) ToExecutionConfigPtrOutputWithContext added in v0.12.0

func (i ExecutionConfigArgs) ToExecutionConfigPtrOutputWithContext(ctx context.Context) ExecutionConfigPtrOutput

type ExecutionConfigInput added in v0.12.0

type ExecutionConfigInput interface {
	pulumi.Input

	ToExecutionConfigOutput() ExecutionConfigOutput
	ToExecutionConfigOutputWithContext(context.Context) ExecutionConfigOutput
}

ExecutionConfigInput is an input type that accepts ExecutionConfigArgs and ExecutionConfigOutput values. You can construct a concrete instance of `ExecutionConfigInput` via:

ExecutionConfigArgs{...}

type ExecutionConfigOutput added in v0.12.0

type ExecutionConfigOutput struct{ *pulumi.OutputState }

Execution configuration for a workload.

func (ExecutionConfigOutput) ElementType added in v0.12.0

func (ExecutionConfigOutput) ElementType() reflect.Type

func (ExecutionConfigOutput) IdleTtl added in v0.24.0

Optional. Applies to sessions only. The duration to keep the session alive while it's idling. Exceeding this threshold causes the session to terminate. This field cannot be set on a batch workload. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults to 1 hour if not set. If both ttl and idle_ttl are specified for an interactive session, the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.

func (ExecutionConfigOutput) KmsKey added in v0.12.0

Optional. The Cloud KMS key to use for encryption.

func (ExecutionConfigOutput) NetworkTags added in v0.12.0

Optional. Tags used for network traffic control.

func (ExecutionConfigOutput) NetworkUri added in v0.12.0

Optional. Network URI to connect workload to.

func (ExecutionConfigOutput) ServiceAccount added in v0.12.0

func (o ExecutionConfigOutput) ServiceAccount() pulumi.StringPtrOutput

Optional. Service account that used to execute workload.

func (ExecutionConfigOutput) StagingBucket added in v0.29.0

func (o ExecutionConfigOutput) StagingBucket() pulumi.StringPtrOutput

Optional. A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.

func (ExecutionConfigOutput) SubnetworkUri added in v0.12.0

func (o ExecutionConfigOutput) SubnetworkUri() pulumi.StringPtrOutput

Optional. Subnetwork URI to connect workload to.

func (ExecutionConfigOutput) ToExecutionConfigOutput added in v0.12.0

func (o ExecutionConfigOutput) ToExecutionConfigOutput() ExecutionConfigOutput

func (ExecutionConfigOutput) ToExecutionConfigOutputWithContext added in v0.12.0

func (o ExecutionConfigOutput) ToExecutionConfigOutputWithContext(ctx context.Context) ExecutionConfigOutput

func (ExecutionConfigOutput) ToExecutionConfigPtrOutput added in v0.12.0

func (o ExecutionConfigOutput) ToExecutionConfigPtrOutput() ExecutionConfigPtrOutput

func (ExecutionConfigOutput) ToExecutionConfigPtrOutputWithContext added in v0.12.0

func (o ExecutionConfigOutput) ToExecutionConfigPtrOutputWithContext(ctx context.Context) ExecutionConfigPtrOutput

func (ExecutionConfigOutput) Ttl added in v0.29.0

Optional. The duration after which the workload will be terminated, specified as the JSON representation for Duration (https://protobuf.dev/programming-guides/proto3/#json). When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idle_ttl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.

type ExecutionConfigPtrInput added in v0.12.0

type ExecutionConfigPtrInput interface {
	pulumi.Input

	ToExecutionConfigPtrOutput() ExecutionConfigPtrOutput
	ToExecutionConfigPtrOutputWithContext(context.Context) ExecutionConfigPtrOutput
}

ExecutionConfigPtrInput is an input type that accepts ExecutionConfigArgs, ExecutionConfigPtr and ExecutionConfigPtrOutput values. You can construct a concrete instance of `ExecutionConfigPtrInput` via:

        ExecutionConfigArgs{...}

or:

        nil

func ExecutionConfigPtr added in v0.12.0

func ExecutionConfigPtr(v *ExecutionConfigArgs) ExecutionConfigPtrInput

type ExecutionConfigPtrOutput added in v0.12.0

type ExecutionConfigPtrOutput struct{ *pulumi.OutputState }

func (ExecutionConfigPtrOutput) Elem added in v0.12.0

func (ExecutionConfigPtrOutput) ElementType added in v0.12.0

func (ExecutionConfigPtrOutput) ElementType() reflect.Type

func (ExecutionConfigPtrOutput) IdleTtl added in v0.24.0

Optional. Applies to sessions only. The duration to keep the session alive while it's idling. Exceeding this threshold causes the session to terminate. This field cannot be set on a batch workload. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults to 1 hour if not set. If both ttl and idle_ttl are specified for an interactive session, the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.

func (ExecutionConfigPtrOutput) KmsKey added in v0.12.0

Optional. The Cloud KMS key to use for encryption.

func (ExecutionConfigPtrOutput) NetworkTags added in v0.12.0

Optional. Tags used for network traffic control.

func (ExecutionConfigPtrOutput) NetworkUri added in v0.12.0

Optional. Network URI to connect workload to.

func (ExecutionConfigPtrOutput) ServiceAccount added in v0.12.0

func (o ExecutionConfigPtrOutput) ServiceAccount() pulumi.StringPtrOutput

Optional. Service account that used to execute workload.

func (ExecutionConfigPtrOutput) StagingBucket added in v0.29.0

Optional. A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.

func (ExecutionConfigPtrOutput) SubnetworkUri added in v0.12.0

Optional. Subnetwork URI to connect workload to.

func (ExecutionConfigPtrOutput) ToExecutionConfigPtrOutput added in v0.12.0

func (o ExecutionConfigPtrOutput) ToExecutionConfigPtrOutput() ExecutionConfigPtrOutput

func (ExecutionConfigPtrOutput) ToExecutionConfigPtrOutputWithContext added in v0.12.0

func (o ExecutionConfigPtrOutput) ToExecutionConfigPtrOutputWithContext(ctx context.Context) ExecutionConfigPtrOutput

func (ExecutionConfigPtrOutput) Ttl added in v0.29.0

Optional. The duration after which the workload will be terminated, specified as the JSON representation for Duration (https://protobuf.dev/programming-guides/proto3/#json). When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idle_ttl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.

type ExecutionConfigResponse added in v0.12.0

type ExecutionConfigResponse struct {
	// Optional. Applies to sessions only. The duration to keep the session alive while it's idling. Exceeding this threshold causes the session to terminate. This field cannot be set on a batch workload. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults to 1 hour if not set. If both ttl and idle_ttl are specified for an interactive session, the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.
	IdleTtl string `pulumi:"idleTtl"`
	// Optional. The Cloud KMS key to use for encryption.
	KmsKey string `pulumi:"kmsKey"`
	// Optional. Tags used for network traffic control.
	NetworkTags []string `pulumi:"networkTags"`
	// Optional. Network URI to connect workload to.
	NetworkUri string `pulumi:"networkUri"`
	// Optional. Service account that used to execute workload.
	ServiceAccount string `pulumi:"serviceAccount"`
	// Optional. A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
	StagingBucket string `pulumi:"stagingBucket"`
	// Optional. Subnetwork URI to connect workload to.
	SubnetworkUri string `pulumi:"subnetworkUri"`
	// Optional. The duration after which the workload will be terminated, specified as the JSON representation for Duration (https://protobuf.dev/programming-guides/proto3/#json). When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idle_ttl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.
	Ttl string `pulumi:"ttl"`
}

Execution configuration for a workload.

type ExecutionConfigResponseOutput added in v0.12.0

type ExecutionConfigResponseOutput struct{ *pulumi.OutputState }

Execution configuration for a workload.

func (ExecutionConfigResponseOutput) ElementType added in v0.12.0

func (ExecutionConfigResponseOutput) IdleTtl added in v0.24.0

Optional. Applies to sessions only. The duration to keep the session alive while it's idling. Exceeding this threshold causes the session to terminate. This field cannot be set on a batch workload. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults to 1 hour if not set. If both ttl and idle_ttl are specified for an interactive session, the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.

func (ExecutionConfigResponseOutput) KmsKey added in v0.12.0

Optional. The Cloud KMS key to use for encryption.

func (ExecutionConfigResponseOutput) NetworkTags added in v0.12.0

Optional. Tags used for network traffic control.

func (ExecutionConfigResponseOutput) NetworkUri added in v0.12.0

Optional. Network URI to connect workload to.

func (ExecutionConfigResponseOutput) ServiceAccount added in v0.12.0

Optional. Service account that used to execute workload.

func (ExecutionConfigResponseOutput) StagingBucket added in v0.29.0

Optional. A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.

func (ExecutionConfigResponseOutput) SubnetworkUri added in v0.12.0

Optional. Subnetwork URI to connect workload to.

func (ExecutionConfigResponseOutput) ToExecutionConfigResponseOutput added in v0.12.0

func (o ExecutionConfigResponseOutput) ToExecutionConfigResponseOutput() ExecutionConfigResponseOutput

func (ExecutionConfigResponseOutput) ToExecutionConfigResponseOutputWithContext added in v0.12.0

func (o ExecutionConfigResponseOutput) ToExecutionConfigResponseOutputWithContext(ctx context.Context) ExecutionConfigResponseOutput

func (ExecutionConfigResponseOutput) Ttl added in v0.29.0

Optional. The duration after which the workload will be terminated, specified as the JSON representation for Duration (https://protobuf.dev/programming-guides/proto3/#json). When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idle_ttl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.

type Expr

type Expr struct {
	// Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
	Description *string `pulumi:"description"`
	// Textual representation of an expression in Common Expression Language syntax.
	Expression *string `pulumi:"expression"`
	// Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
	Location *string `pulumi:"location"`
	// Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
	Title *string `pulumi:"title"`
}

Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec.Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.

type ExprArgs

type ExprArgs struct {
	// Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
	Description pulumi.StringPtrInput `pulumi:"description"`
	// Textual representation of an expression in Common Expression Language syntax.
	Expression pulumi.StringPtrInput `pulumi:"expression"`
	// Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
	Location pulumi.StringPtrInput `pulumi:"location"`
	// Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
	Title pulumi.StringPtrInput `pulumi:"title"`
}

Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec.Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.

func (ExprArgs) ElementType

func (ExprArgs) ElementType() reflect.Type

func (ExprArgs) ToExprOutput

func (i ExprArgs) ToExprOutput() ExprOutput

func (ExprArgs) ToExprOutputWithContext

func (i ExprArgs) ToExprOutputWithContext(ctx context.Context) ExprOutput

func (ExprArgs) ToExprPtrOutput

func (i ExprArgs) ToExprPtrOutput() ExprPtrOutput

func (ExprArgs) ToExprPtrOutputWithContext

func (i ExprArgs) ToExprPtrOutputWithContext(ctx context.Context) ExprPtrOutput

type ExprInput

type ExprInput interface {
	pulumi.Input

	ToExprOutput() ExprOutput
	ToExprOutputWithContext(context.Context) ExprOutput
}

ExprInput is an input type that accepts ExprArgs and ExprOutput values. You can construct a concrete instance of `ExprInput` via:

ExprArgs{...}

type ExprOutput

type ExprOutput struct{ *pulumi.OutputState }

Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec.Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.

func (ExprOutput) Description

func (o ExprOutput) Description() pulumi.StringPtrOutput

Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.

func (ExprOutput) ElementType

func (ExprOutput) ElementType() reflect.Type

func (ExprOutput) Expression

func (o ExprOutput) Expression() pulumi.StringPtrOutput

Textual representation of an expression in Common Expression Language syntax.

func (ExprOutput) Location

func (o ExprOutput) Location() pulumi.StringPtrOutput

Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.

func (ExprOutput) Title

func (o ExprOutput) Title() pulumi.StringPtrOutput

Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.

func (ExprOutput) ToExprOutput

func (o ExprOutput) ToExprOutput() ExprOutput

func (ExprOutput) ToExprOutputWithContext

func (o ExprOutput) ToExprOutputWithContext(ctx context.Context) ExprOutput

func (ExprOutput) ToExprPtrOutput

func (o ExprOutput) ToExprPtrOutput() ExprPtrOutput

func (ExprOutput) ToExprPtrOutputWithContext

func (o ExprOutput) ToExprPtrOutputWithContext(ctx context.Context) ExprPtrOutput

type ExprPtrInput

type ExprPtrInput interface {
	pulumi.Input

	ToExprPtrOutput() ExprPtrOutput
	ToExprPtrOutputWithContext(context.Context) ExprPtrOutput
}

ExprPtrInput is an input type that accepts ExprArgs, ExprPtr and ExprPtrOutput values. You can construct a concrete instance of `ExprPtrInput` via:

        ExprArgs{...}

or:

        nil

func ExprPtr

func ExprPtr(v *ExprArgs) ExprPtrInput

type ExprPtrOutput

type ExprPtrOutput struct{ *pulumi.OutputState }

func (ExprPtrOutput) Description

func (o ExprPtrOutput) Description() pulumi.StringPtrOutput

Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.

func (ExprPtrOutput) Elem

func (o ExprPtrOutput) Elem() ExprOutput

func (ExprPtrOutput) ElementType

func (ExprPtrOutput) ElementType() reflect.Type

func (ExprPtrOutput) Expression

func (o ExprPtrOutput) Expression() pulumi.StringPtrOutput

Textual representation of an expression in Common Expression Language syntax.

func (ExprPtrOutput) Location

func (o ExprPtrOutput) Location() pulumi.StringPtrOutput

Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.

func (ExprPtrOutput) Title

Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.

func (ExprPtrOutput) ToExprPtrOutput

func (o ExprPtrOutput) ToExprPtrOutput() ExprPtrOutput

func (ExprPtrOutput) ToExprPtrOutputWithContext

func (o ExprPtrOutput) ToExprPtrOutputWithContext(ctx context.Context) ExprPtrOutput

type ExprResponse

type ExprResponse struct {
	// Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
	Description string `pulumi:"description"`
	// Textual representation of an expression in Common Expression Language syntax.
	Expression string `pulumi:"expression"`
	// Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
	Location string `pulumi:"location"`
	// Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
	Title string `pulumi:"title"`
}

Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec.Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.

type ExprResponseOutput

type ExprResponseOutput struct{ *pulumi.OutputState }

Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec.Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.

func (ExprResponseOutput) Description

func (o ExprResponseOutput) Description() pulumi.StringOutput

Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.

func (ExprResponseOutput) ElementType

func (ExprResponseOutput) ElementType() reflect.Type

func (ExprResponseOutput) Expression

func (o ExprResponseOutput) Expression() pulumi.StringOutput

Textual representation of an expression in Common Expression Language syntax.

func (ExprResponseOutput) Location

func (o ExprResponseOutput) Location() pulumi.StringOutput

Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.

func (ExprResponseOutput) Title

Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.

func (ExprResponseOutput) ToExprResponseOutput

func (o ExprResponseOutput) ToExprResponseOutput() ExprResponseOutput

func (ExprResponseOutput) ToExprResponseOutputWithContext

func (o ExprResponseOutput) ToExprResponseOutputWithContext(ctx context.Context) ExprResponseOutput

type FlinkJob added in v0.32.0

type FlinkJob struct {
	// Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision might occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Flink driver and tasks.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *LoggingConfig `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris.
	MainClass *string `pulumi:"mainClass"`
	// The HCFS URI of the jar file that contains the main class.
	MainJarFileUri *string `pulumi:"mainJarFileUri"`
	// Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code.
	Properties map[string]string `pulumi:"properties"`
	// Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job.
	SavepointUri *string `pulumi:"savepointUri"`
}

A Dataproc job for running Apache Flink applications on YARN.

type FlinkJobArgs added in v0.32.0

type FlinkJobArgs struct {
	// Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision might occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Flink driver and tasks.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigPtrInput `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris.
	MainClass pulumi.StringPtrInput `pulumi:"mainClass"`
	// The HCFS URI of the jar file that contains the main class.
	MainJarFileUri pulumi.StringPtrInput `pulumi:"mainJarFileUri"`
	// Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job.
	SavepointUri pulumi.StringPtrInput `pulumi:"savepointUri"`
}

A Dataproc job for running Apache Flink applications on YARN.

func (FlinkJobArgs) ElementType added in v0.32.0

func (FlinkJobArgs) ElementType() reflect.Type

func (FlinkJobArgs) ToFlinkJobOutput added in v0.32.0

func (i FlinkJobArgs) ToFlinkJobOutput() FlinkJobOutput

func (FlinkJobArgs) ToFlinkJobOutputWithContext added in v0.32.0

func (i FlinkJobArgs) ToFlinkJobOutputWithContext(ctx context.Context) FlinkJobOutput

func (FlinkJobArgs) ToFlinkJobPtrOutput added in v0.32.0

func (i FlinkJobArgs) ToFlinkJobPtrOutput() FlinkJobPtrOutput

func (FlinkJobArgs) ToFlinkJobPtrOutputWithContext added in v0.32.0

func (i FlinkJobArgs) ToFlinkJobPtrOutputWithContext(ctx context.Context) FlinkJobPtrOutput

type FlinkJobInput added in v0.32.0

type FlinkJobInput interface {
	pulumi.Input

	ToFlinkJobOutput() FlinkJobOutput
	ToFlinkJobOutputWithContext(context.Context) FlinkJobOutput
}

FlinkJobInput is an input type that accepts FlinkJobArgs and FlinkJobOutput values. You can construct a concrete instance of `FlinkJobInput` via:

FlinkJobArgs{...}

type FlinkJobOutput added in v0.32.0

type FlinkJobOutput struct{ *pulumi.OutputState }

A Dataproc job for running Apache Flink applications on YARN.

func (FlinkJobOutput) Args added in v0.32.0

Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision might occur that causes an incorrect job submission.

func (FlinkJobOutput) ElementType added in v0.32.0

func (FlinkJobOutput) ElementType() reflect.Type

func (FlinkJobOutput) JarFileUris added in v0.32.0

func (o FlinkJobOutput) JarFileUris() pulumi.StringArrayOutput

Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Flink driver and tasks.

func (FlinkJobOutput) LoggingConfig added in v0.32.0

func (o FlinkJobOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (FlinkJobOutput) MainClass added in v0.32.0

func (o FlinkJobOutput) MainClass() pulumi.StringPtrOutput

The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris.

func (FlinkJobOutput) MainJarFileUri added in v0.32.0

func (o FlinkJobOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file that contains the main class.

func (FlinkJobOutput) Properties added in v0.32.0

func (o FlinkJobOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code.

func (FlinkJobOutput) SavepointUri added in v0.32.0

func (o FlinkJobOutput) SavepointUri() pulumi.StringPtrOutput

Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job.

func (FlinkJobOutput) ToFlinkJobOutput added in v0.32.0

func (o FlinkJobOutput) ToFlinkJobOutput() FlinkJobOutput

func (FlinkJobOutput) ToFlinkJobOutputWithContext added in v0.32.0

func (o FlinkJobOutput) ToFlinkJobOutputWithContext(ctx context.Context) FlinkJobOutput

func (FlinkJobOutput) ToFlinkJobPtrOutput added in v0.32.0

func (o FlinkJobOutput) ToFlinkJobPtrOutput() FlinkJobPtrOutput

func (FlinkJobOutput) ToFlinkJobPtrOutputWithContext added in v0.32.0

func (o FlinkJobOutput) ToFlinkJobPtrOutputWithContext(ctx context.Context) FlinkJobPtrOutput

type FlinkJobPtrInput added in v0.32.0

type FlinkJobPtrInput interface {
	pulumi.Input

	ToFlinkJobPtrOutput() FlinkJobPtrOutput
	ToFlinkJobPtrOutputWithContext(context.Context) FlinkJobPtrOutput
}

FlinkJobPtrInput is an input type that accepts FlinkJobArgs, FlinkJobPtr and FlinkJobPtrOutput values. You can construct a concrete instance of `FlinkJobPtrInput` via:

        FlinkJobArgs{...}

or:

        nil

func FlinkJobPtr added in v0.32.0

func FlinkJobPtr(v *FlinkJobArgs) FlinkJobPtrInput

type FlinkJobPtrOutput added in v0.32.0

type FlinkJobPtrOutput struct{ *pulumi.OutputState }

func (FlinkJobPtrOutput) Args added in v0.32.0

Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision might occur that causes an incorrect job submission.

func (FlinkJobPtrOutput) Elem added in v0.32.0

func (FlinkJobPtrOutput) ElementType added in v0.32.0

func (FlinkJobPtrOutput) ElementType() reflect.Type

func (FlinkJobPtrOutput) JarFileUris added in v0.32.0

func (o FlinkJobPtrOutput) JarFileUris() pulumi.StringArrayOutput

Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Flink driver and tasks.

func (FlinkJobPtrOutput) LoggingConfig added in v0.32.0

func (o FlinkJobPtrOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (FlinkJobPtrOutput) MainClass added in v0.32.0

The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris.

func (FlinkJobPtrOutput) MainJarFileUri added in v0.32.0

func (o FlinkJobPtrOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file that contains the main class.

func (FlinkJobPtrOutput) Properties added in v0.32.0

func (o FlinkJobPtrOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code.

func (FlinkJobPtrOutput) SavepointUri added in v0.32.0

func (o FlinkJobPtrOutput) SavepointUri() pulumi.StringPtrOutput

Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job.

func (FlinkJobPtrOutput) ToFlinkJobPtrOutput added in v0.32.0

func (o FlinkJobPtrOutput) ToFlinkJobPtrOutput() FlinkJobPtrOutput

func (FlinkJobPtrOutput) ToFlinkJobPtrOutputWithContext added in v0.32.0

func (o FlinkJobPtrOutput) ToFlinkJobPtrOutputWithContext(ctx context.Context) FlinkJobPtrOutput

type FlinkJobResponse added in v0.32.0

type FlinkJobResponse struct {
	// Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision might occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Flink driver and tasks.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigResponse `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris.
	MainClass string `pulumi:"mainClass"`
	// The HCFS URI of the jar file that contains the main class.
	MainJarFileUri string `pulumi:"mainJarFileUri"`
	// Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code.
	Properties map[string]string `pulumi:"properties"`
	// Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job.
	SavepointUri string `pulumi:"savepointUri"`
}

A Dataproc job for running Apache Flink applications on YARN.

type FlinkJobResponseOutput added in v0.32.0

type FlinkJobResponseOutput struct{ *pulumi.OutputState }

A Dataproc job for running Apache Flink applications on YARN.

func (FlinkJobResponseOutput) Args added in v0.32.0

Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision might occur that causes an incorrect job submission.

func (FlinkJobResponseOutput) ElementType added in v0.32.0

func (FlinkJobResponseOutput) ElementType() reflect.Type

func (FlinkJobResponseOutput) JarFileUris added in v0.32.0

Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Flink driver and tasks.

func (FlinkJobResponseOutput) LoggingConfig added in v0.32.0

Optional. The runtime log config for job execution.

func (FlinkJobResponseOutput) MainClass added in v0.32.0

The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris.

func (FlinkJobResponseOutput) MainJarFileUri added in v0.32.0

func (o FlinkJobResponseOutput) MainJarFileUri() pulumi.StringOutput

The HCFS URI of the jar file that contains the main class.

func (FlinkJobResponseOutput) Properties added in v0.32.0

Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code.

func (FlinkJobResponseOutput) SavepointUri added in v0.32.0

func (o FlinkJobResponseOutput) SavepointUri() pulumi.StringOutput

Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job.

func (FlinkJobResponseOutput) ToFlinkJobResponseOutput added in v0.32.0

func (o FlinkJobResponseOutput) ToFlinkJobResponseOutput() FlinkJobResponseOutput

func (FlinkJobResponseOutput) ToFlinkJobResponseOutputWithContext added in v0.32.0

func (o FlinkJobResponseOutput) ToFlinkJobResponseOutputWithContext(ctx context.Context) FlinkJobResponseOutput

type GceClusterConfig

type GceClusterConfig struct {
	// Optional. Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs).
	ConfidentialInstanceConfig *ConfidentialInstanceConfig `pulumi:"confidentialInstanceConfig"`
	// Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.
	InternalIpOnly *bool `pulumi:"internalIpOnly"`
	// Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
	Metadata map[string]string `pulumi:"metadata"`
	// Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default
	NetworkUri *string `pulumi:"networkUri"`
	// Optional. Node Group Affinity for sole-tenant clusters.
	NodeGroupAffinity *NodeGroupAffinity `pulumi:"nodeGroupAffinity"`
	// Optional. The type of IPv6 access for a cluster.
	PrivateIpv6GoogleAccess *GceClusterConfigPrivateIpv6GoogleAccess `pulumi:"privateIpv6GoogleAccess"`
	// Optional. Reservation Affinity for consuming Zonal reservation.
	ReservationAffinity *ReservationAffinity `pulumi:"reservationAffinity"`
	// Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
	ServiceAccount *string `pulumi:"serviceAccount"`
	// Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control
	ServiceAccountScopes []string `pulumi:"serviceAccountScopes"`
	// Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm).
	ShieldedInstanceConfig *ShieldedInstanceConfig `pulumi:"shieldedInstanceConfig"`
	// Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 projects/[project_id]/regions/[region]/subnetworks/sub0 sub0
	SubnetworkUri *string `pulumi:"subnetworkUri"`
	// The Compute Engine tags to add to all instances (see Tagging instances (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).
	Tags []string `pulumi:"tags"`
	// Optional. The Compute Engine zone where the Dataproc cluster will be located. If omitted, the service will pick a zone in the cluster's Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] [zone]
	ZoneUri *string `pulumi:"zoneUri"`
}

Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster.

type GceClusterConfigArgs

type GceClusterConfigArgs struct {
	// Optional. Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs).
	ConfidentialInstanceConfig ConfidentialInstanceConfigPtrInput `pulumi:"confidentialInstanceConfig"`
	// Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.
	InternalIpOnly pulumi.BoolPtrInput `pulumi:"internalIpOnly"`
	// Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
	Metadata pulumi.StringMapInput `pulumi:"metadata"`
	// Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default
	NetworkUri pulumi.StringPtrInput `pulumi:"networkUri"`
	// Optional. Node Group Affinity for sole-tenant clusters.
	NodeGroupAffinity NodeGroupAffinityPtrInput `pulumi:"nodeGroupAffinity"`
	// Optional. The type of IPv6 access for a cluster.
	PrivateIpv6GoogleAccess GceClusterConfigPrivateIpv6GoogleAccessPtrInput `pulumi:"privateIpv6GoogleAccess"`
	// Optional. Reservation Affinity for consuming Zonal reservation.
	ReservationAffinity ReservationAffinityPtrInput `pulumi:"reservationAffinity"`
	// Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
	ServiceAccount pulumi.StringPtrInput `pulumi:"serviceAccount"`
	// Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control
	ServiceAccountScopes pulumi.StringArrayInput `pulumi:"serviceAccountScopes"`
	// Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm).
	ShieldedInstanceConfig ShieldedInstanceConfigPtrInput `pulumi:"shieldedInstanceConfig"`
	// Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 projects/[project_id]/regions/[region]/subnetworks/sub0 sub0
	SubnetworkUri pulumi.StringPtrInput `pulumi:"subnetworkUri"`
	// The Compute Engine tags to add to all instances (see Tagging instances (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).
	Tags pulumi.StringArrayInput `pulumi:"tags"`
	// Optional. The Compute Engine zone where the Dataproc cluster will be located. If omitted, the service will pick a zone in the cluster's Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] [zone]
	ZoneUri pulumi.StringPtrInput `pulumi:"zoneUri"`
}

Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster.

func (GceClusterConfigArgs) ElementType

func (GceClusterConfigArgs) ElementType() reflect.Type

func (GceClusterConfigArgs) ToGceClusterConfigOutput

func (i GceClusterConfigArgs) ToGceClusterConfigOutput() GceClusterConfigOutput

func (GceClusterConfigArgs) ToGceClusterConfigOutputWithContext

func (i GceClusterConfigArgs) ToGceClusterConfigOutputWithContext(ctx context.Context) GceClusterConfigOutput

func (GceClusterConfigArgs) ToGceClusterConfigPtrOutput

func (i GceClusterConfigArgs) ToGceClusterConfigPtrOutput() GceClusterConfigPtrOutput

func (GceClusterConfigArgs) ToGceClusterConfigPtrOutputWithContext

func (i GceClusterConfigArgs) ToGceClusterConfigPtrOutputWithContext(ctx context.Context) GceClusterConfigPtrOutput

type GceClusterConfigInput

type GceClusterConfigInput interface {
	pulumi.Input

	ToGceClusterConfigOutput() GceClusterConfigOutput
	ToGceClusterConfigOutputWithContext(context.Context) GceClusterConfigOutput
}

GceClusterConfigInput is an input type that accepts GceClusterConfigArgs and GceClusterConfigOutput values. You can construct a concrete instance of `GceClusterConfigInput` via:

GceClusterConfigArgs{...}

type GceClusterConfigOutput

type GceClusterConfigOutput struct{ *pulumi.OutputState }

Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster.

func (GceClusterConfigOutput) ConfidentialInstanceConfig

func (o GceClusterConfigOutput) ConfidentialInstanceConfig() ConfidentialInstanceConfigPtrOutput

Optional. Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs).

func (GceClusterConfigOutput) ElementType

func (GceClusterConfigOutput) ElementType() reflect.Type

func (GceClusterConfigOutput) InternalIpOnly

func (o GceClusterConfigOutput) InternalIpOnly() pulumi.BoolPtrOutput

Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.

func (GceClusterConfigOutput) Metadata

Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).

func (GceClusterConfigOutput) NetworkUri

Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default

func (GceClusterConfigOutput) NodeGroupAffinity

func (o GceClusterConfigOutput) NodeGroupAffinity() NodeGroupAffinityPtrOutput

Optional. Node Group Affinity for sole-tenant clusters.

func (GceClusterConfigOutput) PrivateIpv6GoogleAccess

Optional. The type of IPv6 access for a cluster.

func (GceClusterConfigOutput) ReservationAffinity

func (o GceClusterConfigOutput) ReservationAffinity() ReservationAffinityPtrOutput

Optional. Reservation Affinity for consuming Zonal reservation.

func (GceClusterConfigOutput) ServiceAccount

func (o GceClusterConfigOutput) ServiceAccount() pulumi.StringPtrOutput

Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.

func (GceClusterConfigOutput) ServiceAccountScopes

func (o GceClusterConfigOutput) ServiceAccountScopes() pulumi.StringArrayOutput

Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control

func (GceClusterConfigOutput) ShieldedInstanceConfig

func (o GceClusterConfigOutput) ShieldedInstanceConfig() ShieldedInstanceConfigPtrOutput

Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm).

func (GceClusterConfigOutput) SubnetworkUri

func (o GceClusterConfigOutput) SubnetworkUri() pulumi.StringPtrOutput

Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 projects/[project_id]/regions/[region]/subnetworks/sub0 sub0

func (GceClusterConfigOutput) Tags

The Compute Engine tags to add to all instances (see Tagging instances (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).

func (GceClusterConfigOutput) ToGceClusterConfigOutput

func (o GceClusterConfigOutput) ToGceClusterConfigOutput() GceClusterConfigOutput

func (GceClusterConfigOutput) ToGceClusterConfigOutputWithContext

func (o GceClusterConfigOutput) ToGceClusterConfigOutputWithContext(ctx context.Context) GceClusterConfigOutput

func (GceClusterConfigOutput) ToGceClusterConfigPtrOutput

func (o GceClusterConfigOutput) ToGceClusterConfigPtrOutput() GceClusterConfigPtrOutput

func (GceClusterConfigOutput) ToGceClusterConfigPtrOutputWithContext

func (o GceClusterConfigOutput) ToGceClusterConfigPtrOutputWithContext(ctx context.Context) GceClusterConfigPtrOutput

func (GceClusterConfigOutput) ZoneUri

Optional. The Compute Engine zone where the Dataproc cluster will be located. If omitted, the service will pick a zone in the cluster's Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] [zone]

type GceClusterConfigPrivateIpv6GoogleAccess added in v0.4.0

type GceClusterConfigPrivateIpv6GoogleAccess string

Optional. The type of IPv6 access for a cluster.

func (GceClusterConfigPrivateIpv6GoogleAccess) ElementType added in v0.4.0

func (GceClusterConfigPrivateIpv6GoogleAccess) ToGceClusterConfigPrivateIpv6GoogleAccessOutput added in v0.6.0

func (e GceClusterConfigPrivateIpv6GoogleAccess) ToGceClusterConfigPrivateIpv6GoogleAccessOutput() GceClusterConfigPrivateIpv6GoogleAccessOutput

func (GceClusterConfigPrivateIpv6GoogleAccess) ToGceClusterConfigPrivateIpv6GoogleAccessOutputWithContext added in v0.6.0

func (e GceClusterConfigPrivateIpv6GoogleAccess) ToGceClusterConfigPrivateIpv6GoogleAccessOutputWithContext(ctx context.Context) GceClusterConfigPrivateIpv6GoogleAccessOutput

func (GceClusterConfigPrivateIpv6GoogleAccess) ToGceClusterConfigPrivateIpv6GoogleAccessPtrOutput added in v0.6.0

func (e GceClusterConfigPrivateIpv6GoogleAccess) ToGceClusterConfigPrivateIpv6GoogleAccessPtrOutput() GceClusterConfigPrivateIpv6GoogleAccessPtrOutput

func (GceClusterConfigPrivateIpv6GoogleAccess) ToGceClusterConfigPrivateIpv6GoogleAccessPtrOutputWithContext added in v0.6.0

func (e GceClusterConfigPrivateIpv6GoogleAccess) ToGceClusterConfigPrivateIpv6GoogleAccessPtrOutputWithContext(ctx context.Context) GceClusterConfigPrivateIpv6GoogleAccessPtrOutput

func (GceClusterConfigPrivateIpv6GoogleAccess) ToStringOutput added in v0.4.0

func (GceClusterConfigPrivateIpv6GoogleAccess) ToStringOutputWithContext added in v0.4.0

func (GceClusterConfigPrivateIpv6GoogleAccess) ToStringPtrOutput added in v0.4.0

func (GceClusterConfigPrivateIpv6GoogleAccess) ToStringPtrOutputWithContext added in v0.4.0

type GceClusterConfigPrivateIpv6GoogleAccessInput added in v0.6.0

type GceClusterConfigPrivateIpv6GoogleAccessInput interface {
	pulumi.Input

	ToGceClusterConfigPrivateIpv6GoogleAccessOutput() GceClusterConfigPrivateIpv6GoogleAccessOutput
	ToGceClusterConfigPrivateIpv6GoogleAccessOutputWithContext(context.Context) GceClusterConfigPrivateIpv6GoogleAccessOutput
}

GceClusterConfigPrivateIpv6GoogleAccessInput is an input type that accepts GceClusterConfigPrivateIpv6GoogleAccessArgs and GceClusterConfigPrivateIpv6GoogleAccessOutput values. You can construct a concrete instance of `GceClusterConfigPrivateIpv6GoogleAccessInput` via:

GceClusterConfigPrivateIpv6GoogleAccessArgs{...}

type GceClusterConfigPrivateIpv6GoogleAccessOutput added in v0.6.0

type GceClusterConfigPrivateIpv6GoogleAccessOutput struct{ *pulumi.OutputState }

func (GceClusterConfigPrivateIpv6GoogleAccessOutput) ElementType added in v0.6.0

func (GceClusterConfigPrivateIpv6GoogleAccessOutput) ToGceClusterConfigPrivateIpv6GoogleAccessOutput added in v0.6.0

func (o GceClusterConfigPrivateIpv6GoogleAccessOutput) ToGceClusterConfigPrivateIpv6GoogleAccessOutput() GceClusterConfigPrivateIpv6GoogleAccessOutput

func (GceClusterConfigPrivateIpv6GoogleAccessOutput) ToGceClusterConfigPrivateIpv6GoogleAccessOutputWithContext added in v0.6.0

func (o GceClusterConfigPrivateIpv6GoogleAccessOutput) ToGceClusterConfigPrivateIpv6GoogleAccessOutputWithContext(ctx context.Context) GceClusterConfigPrivateIpv6GoogleAccessOutput

func (GceClusterConfigPrivateIpv6GoogleAccessOutput) ToGceClusterConfigPrivateIpv6GoogleAccessPtrOutput added in v0.6.0

func (o GceClusterConfigPrivateIpv6GoogleAccessOutput) ToGceClusterConfigPrivateIpv6GoogleAccessPtrOutput() GceClusterConfigPrivateIpv6GoogleAccessPtrOutput

func (GceClusterConfigPrivateIpv6GoogleAccessOutput) ToGceClusterConfigPrivateIpv6GoogleAccessPtrOutputWithContext added in v0.6.0

func (o GceClusterConfigPrivateIpv6GoogleAccessOutput) ToGceClusterConfigPrivateIpv6GoogleAccessPtrOutputWithContext(ctx context.Context) GceClusterConfigPrivateIpv6GoogleAccessPtrOutput

func (GceClusterConfigPrivateIpv6GoogleAccessOutput) ToStringOutput added in v0.6.0

func (GceClusterConfigPrivateIpv6GoogleAccessOutput) ToStringOutputWithContext added in v0.6.0

func (GceClusterConfigPrivateIpv6GoogleAccessOutput) ToStringPtrOutput added in v0.6.0

func (GceClusterConfigPrivateIpv6GoogleAccessOutput) ToStringPtrOutputWithContext added in v0.6.0

type GceClusterConfigPrivateIpv6GoogleAccessPtrInput added in v0.6.0

type GceClusterConfigPrivateIpv6GoogleAccessPtrInput interface {
	pulumi.Input

	ToGceClusterConfigPrivateIpv6GoogleAccessPtrOutput() GceClusterConfigPrivateIpv6GoogleAccessPtrOutput
	ToGceClusterConfigPrivateIpv6GoogleAccessPtrOutputWithContext(context.Context) GceClusterConfigPrivateIpv6GoogleAccessPtrOutput
}

func GceClusterConfigPrivateIpv6GoogleAccessPtr added in v0.6.0

func GceClusterConfigPrivateIpv6GoogleAccessPtr(v string) GceClusterConfigPrivateIpv6GoogleAccessPtrInput

type GceClusterConfigPrivateIpv6GoogleAccessPtrOutput added in v0.6.0

type GceClusterConfigPrivateIpv6GoogleAccessPtrOutput struct{ *pulumi.OutputState }

func (GceClusterConfigPrivateIpv6GoogleAccessPtrOutput) Elem added in v0.6.0

func (GceClusterConfigPrivateIpv6GoogleAccessPtrOutput) ElementType added in v0.6.0

func (GceClusterConfigPrivateIpv6GoogleAccessPtrOutput) ToGceClusterConfigPrivateIpv6GoogleAccessPtrOutput added in v0.6.0

func (o GceClusterConfigPrivateIpv6GoogleAccessPtrOutput) ToGceClusterConfigPrivateIpv6GoogleAccessPtrOutput() GceClusterConfigPrivateIpv6GoogleAccessPtrOutput

func (GceClusterConfigPrivateIpv6GoogleAccessPtrOutput) ToGceClusterConfigPrivateIpv6GoogleAccessPtrOutputWithContext added in v0.6.0

func (o GceClusterConfigPrivateIpv6GoogleAccessPtrOutput) ToGceClusterConfigPrivateIpv6GoogleAccessPtrOutputWithContext(ctx context.Context) GceClusterConfigPrivateIpv6GoogleAccessPtrOutput

func (GceClusterConfigPrivateIpv6GoogleAccessPtrOutput) ToStringPtrOutput added in v0.6.0

func (GceClusterConfigPrivateIpv6GoogleAccessPtrOutput) ToStringPtrOutputWithContext added in v0.6.0

type GceClusterConfigPtrInput

type GceClusterConfigPtrInput interface {
	pulumi.Input

	ToGceClusterConfigPtrOutput() GceClusterConfigPtrOutput
	ToGceClusterConfigPtrOutputWithContext(context.Context) GceClusterConfigPtrOutput
}

GceClusterConfigPtrInput is an input type that accepts GceClusterConfigArgs, GceClusterConfigPtr and GceClusterConfigPtrOutput values. You can construct a concrete instance of `GceClusterConfigPtrInput` via:

        GceClusterConfigArgs{...}

or:

        nil

type GceClusterConfigPtrOutput

type GceClusterConfigPtrOutput struct{ *pulumi.OutputState }

func (GceClusterConfigPtrOutput) ConfidentialInstanceConfig

func (o GceClusterConfigPtrOutput) ConfidentialInstanceConfig() ConfidentialInstanceConfigPtrOutput

Optional. Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs).

func (GceClusterConfigPtrOutput) Elem

func (GceClusterConfigPtrOutput) ElementType

func (GceClusterConfigPtrOutput) ElementType() reflect.Type

func (GceClusterConfigPtrOutput) InternalIpOnly

func (o GceClusterConfigPtrOutput) InternalIpOnly() pulumi.BoolPtrOutput

Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.

func (GceClusterConfigPtrOutput) Metadata

Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).

func (GceClusterConfigPtrOutput) NetworkUri

Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default

func (GceClusterConfigPtrOutput) NodeGroupAffinity

Optional. Node Group Affinity for sole-tenant clusters.

func (GceClusterConfigPtrOutput) PrivateIpv6GoogleAccess

Optional. The type of IPv6 access for a cluster.

func (GceClusterConfigPtrOutput) ReservationAffinity

Optional. Reservation Affinity for consuming Zonal reservation.

func (GceClusterConfigPtrOutput) ServiceAccount

Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.

func (GceClusterConfigPtrOutput) ServiceAccountScopes

func (o GceClusterConfigPtrOutput) ServiceAccountScopes() pulumi.StringArrayOutput

Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control

func (GceClusterConfigPtrOutput) ShieldedInstanceConfig

Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm).

func (GceClusterConfigPtrOutput) SubnetworkUri

Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 projects/[project_id]/regions/[region]/subnetworks/sub0 sub0

func (GceClusterConfigPtrOutput) Tags

The Compute Engine tags to add to all instances (see Tagging instances (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).

func (GceClusterConfigPtrOutput) ToGceClusterConfigPtrOutput

func (o GceClusterConfigPtrOutput) ToGceClusterConfigPtrOutput() GceClusterConfigPtrOutput

func (GceClusterConfigPtrOutput) ToGceClusterConfigPtrOutputWithContext

func (o GceClusterConfigPtrOutput) ToGceClusterConfigPtrOutputWithContext(ctx context.Context) GceClusterConfigPtrOutput

func (GceClusterConfigPtrOutput) ZoneUri

Optional. The Compute Engine zone where the Dataproc cluster will be located. If omitted, the service will pick a zone in the cluster's Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] [zone]

type GceClusterConfigResponse

type GceClusterConfigResponse struct {
	// Optional. Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs).
	ConfidentialInstanceConfig ConfidentialInstanceConfigResponse `pulumi:"confidentialInstanceConfig"`
	// Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.
	InternalIpOnly bool `pulumi:"internalIpOnly"`
	// Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
	Metadata map[string]string `pulumi:"metadata"`
	// Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default
	NetworkUri string `pulumi:"networkUri"`
	// Optional. Node Group Affinity for sole-tenant clusters.
	NodeGroupAffinity NodeGroupAffinityResponse `pulumi:"nodeGroupAffinity"`
	// Optional. The type of IPv6 access for a cluster.
	PrivateIpv6GoogleAccess string `pulumi:"privateIpv6GoogleAccess"`
	// Optional. Reservation Affinity for consuming Zonal reservation.
	ReservationAffinity ReservationAffinityResponse `pulumi:"reservationAffinity"`
	// Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
	ServiceAccount string `pulumi:"serviceAccount"`
	// Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control
	ServiceAccountScopes []string `pulumi:"serviceAccountScopes"`
	// Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm).
	ShieldedInstanceConfig ShieldedInstanceConfigResponse `pulumi:"shieldedInstanceConfig"`
	// Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 projects/[project_id]/regions/[region]/subnetworks/sub0 sub0
	SubnetworkUri string `pulumi:"subnetworkUri"`
	// The Compute Engine tags to add to all instances (see Tagging instances (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).
	Tags []string `pulumi:"tags"`
	// Optional. The Compute Engine zone where the Dataproc cluster will be located. If omitted, the service will pick a zone in the cluster's Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] [zone]
	ZoneUri string `pulumi:"zoneUri"`
}

Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster.

type GceClusterConfigResponseOutput

type GceClusterConfigResponseOutput struct{ *pulumi.OutputState }

Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster.

func (GceClusterConfigResponseOutput) ConfidentialInstanceConfig

Optional. Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs).

func (GceClusterConfigResponseOutput) ElementType

func (GceClusterConfigResponseOutput) InternalIpOnly

Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.

func (GceClusterConfigResponseOutput) Metadata

Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).

func (GceClusterConfigResponseOutput) NetworkUri

Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default

func (GceClusterConfigResponseOutput) NodeGroupAffinity

Optional. Node Group Affinity for sole-tenant clusters.

func (GceClusterConfigResponseOutput) PrivateIpv6GoogleAccess

func (o GceClusterConfigResponseOutput) PrivateIpv6GoogleAccess() pulumi.StringOutput

Optional. The type of IPv6 access for a cluster.

func (GceClusterConfigResponseOutput) ReservationAffinity

Optional. Reservation Affinity for consuming Zonal reservation.

func (GceClusterConfigResponseOutput) ServiceAccount

Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.

func (GceClusterConfigResponseOutput) ServiceAccountScopes

func (o GceClusterConfigResponseOutput) ServiceAccountScopes() pulumi.StringArrayOutput

Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control

func (GceClusterConfigResponseOutput) ShieldedInstanceConfig

Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm).

func (GceClusterConfigResponseOutput) SubnetworkUri

Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 projects/[project_id]/regions/[region]/subnetworks/sub0 sub0

func (GceClusterConfigResponseOutput) Tags

The Compute Engine tags to add to all instances (see Tagging instances (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).

func (GceClusterConfigResponseOutput) ToGceClusterConfigResponseOutput

func (o GceClusterConfigResponseOutput) ToGceClusterConfigResponseOutput() GceClusterConfigResponseOutput

func (GceClusterConfigResponseOutput) ToGceClusterConfigResponseOutputWithContext

func (o GceClusterConfigResponseOutput) ToGceClusterConfigResponseOutputWithContext(ctx context.Context) GceClusterConfigResponseOutput

func (GceClusterConfigResponseOutput) ZoneUri

Optional. The Compute Engine zone where the Dataproc cluster will be located. If omitted, the service will pick a zone in the cluster's Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] [zone]

type GkeClusterConfig

type GkeClusterConfig struct {
	// Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
	GkeClusterTarget *string `pulumi:"gkeClusterTarget"`
	// Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment.
	//
	// Deprecated: Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment.
	NamespacedGkeDeploymentTarget *NamespacedGkeDeploymentTarget `pulumi:"namespacedGkeDeploymentTarget"`
	// Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.
	NodePoolTarget []GkeNodePoolTarget `pulumi:"nodePoolTarget"`
}

The cluster's GKE config.

type GkeClusterConfigArgs

type GkeClusterConfigArgs struct {
	// Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
	GkeClusterTarget pulumi.StringPtrInput `pulumi:"gkeClusterTarget"`
	// Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment.
	//
	// Deprecated: Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment.
	NamespacedGkeDeploymentTarget NamespacedGkeDeploymentTargetPtrInput `pulumi:"namespacedGkeDeploymentTarget"`
	// Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.
	NodePoolTarget GkeNodePoolTargetArrayInput `pulumi:"nodePoolTarget"`
}

The cluster's GKE config.

func (GkeClusterConfigArgs) ElementType

func (GkeClusterConfigArgs) ElementType() reflect.Type

func (GkeClusterConfigArgs) ToGkeClusterConfigOutput

func (i GkeClusterConfigArgs) ToGkeClusterConfigOutput() GkeClusterConfigOutput

func (GkeClusterConfigArgs) ToGkeClusterConfigOutputWithContext

func (i GkeClusterConfigArgs) ToGkeClusterConfigOutputWithContext(ctx context.Context) GkeClusterConfigOutput

func (GkeClusterConfigArgs) ToGkeClusterConfigPtrOutput

func (i GkeClusterConfigArgs) ToGkeClusterConfigPtrOutput() GkeClusterConfigPtrOutput

func (GkeClusterConfigArgs) ToGkeClusterConfigPtrOutputWithContext

func (i GkeClusterConfigArgs) ToGkeClusterConfigPtrOutputWithContext(ctx context.Context) GkeClusterConfigPtrOutput

type GkeClusterConfigInput

type GkeClusterConfigInput interface {
	pulumi.Input

	ToGkeClusterConfigOutput() GkeClusterConfigOutput
	ToGkeClusterConfigOutputWithContext(context.Context) GkeClusterConfigOutput
}

GkeClusterConfigInput is an input type that accepts GkeClusterConfigArgs and GkeClusterConfigOutput values. You can construct a concrete instance of `GkeClusterConfigInput` via:

GkeClusterConfigArgs{...}

type GkeClusterConfigOutput

type GkeClusterConfigOutput struct{ *pulumi.OutputState }

The cluster's GKE config.

func (GkeClusterConfigOutput) ElementType

func (GkeClusterConfigOutput) ElementType() reflect.Type

func (GkeClusterConfigOutput) GkeClusterTarget added in v0.18.2

func (o GkeClusterConfigOutput) GkeClusterTarget() pulumi.StringPtrOutput

Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'

func (GkeClusterConfigOutput) NamespacedGkeDeploymentTarget deprecated

func (o GkeClusterConfigOutput) NamespacedGkeDeploymentTarget() NamespacedGkeDeploymentTargetPtrOutput

Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment.

Deprecated: Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment.

func (GkeClusterConfigOutput) NodePoolTarget added in v0.18.2

Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.

func (GkeClusterConfigOutput) ToGkeClusterConfigOutput

func (o GkeClusterConfigOutput) ToGkeClusterConfigOutput() GkeClusterConfigOutput

func (GkeClusterConfigOutput) ToGkeClusterConfigOutputWithContext

func (o GkeClusterConfigOutput) ToGkeClusterConfigOutputWithContext(ctx context.Context) GkeClusterConfigOutput

func (GkeClusterConfigOutput) ToGkeClusterConfigPtrOutput

func (o GkeClusterConfigOutput) ToGkeClusterConfigPtrOutput() GkeClusterConfigPtrOutput

func (GkeClusterConfigOutput) ToGkeClusterConfigPtrOutputWithContext

func (o GkeClusterConfigOutput) ToGkeClusterConfigPtrOutputWithContext(ctx context.Context) GkeClusterConfigPtrOutput

type GkeClusterConfigPtrInput

type GkeClusterConfigPtrInput interface {
	pulumi.Input

	ToGkeClusterConfigPtrOutput() GkeClusterConfigPtrOutput
	ToGkeClusterConfigPtrOutputWithContext(context.Context) GkeClusterConfigPtrOutput
}

GkeClusterConfigPtrInput is an input type that accepts GkeClusterConfigArgs, GkeClusterConfigPtr and GkeClusterConfigPtrOutput values. You can construct a concrete instance of `GkeClusterConfigPtrInput` via:

        GkeClusterConfigArgs{...}

or:

        nil

type GkeClusterConfigPtrOutput

type GkeClusterConfigPtrOutput struct{ *pulumi.OutputState }

func (GkeClusterConfigPtrOutput) Elem

func (GkeClusterConfigPtrOutput) ElementType

func (GkeClusterConfigPtrOutput) ElementType() reflect.Type

func (GkeClusterConfigPtrOutput) GkeClusterTarget added in v0.18.2

func (o GkeClusterConfigPtrOutput) GkeClusterTarget() pulumi.StringPtrOutput

Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'

func (GkeClusterConfigPtrOutput) NamespacedGkeDeploymentTarget deprecated

func (o GkeClusterConfigPtrOutput) NamespacedGkeDeploymentTarget() NamespacedGkeDeploymentTargetPtrOutput

Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment.

Deprecated: Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment.

func (GkeClusterConfigPtrOutput) NodePoolTarget added in v0.18.2

Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.

func (GkeClusterConfigPtrOutput) ToGkeClusterConfigPtrOutput

func (o GkeClusterConfigPtrOutput) ToGkeClusterConfigPtrOutput() GkeClusterConfigPtrOutput

func (GkeClusterConfigPtrOutput) ToGkeClusterConfigPtrOutputWithContext

func (o GkeClusterConfigPtrOutput) ToGkeClusterConfigPtrOutputWithContext(ctx context.Context) GkeClusterConfigPtrOutput

type GkeClusterConfigResponse

type GkeClusterConfigResponse struct {
	// Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
	GkeClusterTarget string `pulumi:"gkeClusterTarget"`
	// Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment.
	//
	// Deprecated: Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment.
	NamespacedGkeDeploymentTarget NamespacedGkeDeploymentTargetResponse `pulumi:"namespacedGkeDeploymentTarget"`
	// Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.
	NodePoolTarget []GkeNodePoolTargetResponse `pulumi:"nodePoolTarget"`
}

The cluster's GKE config.

type GkeClusterConfigResponseOutput

type GkeClusterConfigResponseOutput struct{ *pulumi.OutputState }

The cluster's GKE config.

func (GkeClusterConfigResponseOutput) ElementType

func (GkeClusterConfigResponseOutput) GkeClusterTarget added in v0.18.2

func (o GkeClusterConfigResponseOutput) GkeClusterTarget() pulumi.StringOutput

Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'

func (GkeClusterConfigResponseOutput) NamespacedGkeDeploymentTarget deprecated

Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment.

Deprecated: Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment.

func (GkeClusterConfigResponseOutput) NodePoolTarget added in v0.18.2

Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.

func (GkeClusterConfigResponseOutput) ToGkeClusterConfigResponseOutput

func (o GkeClusterConfigResponseOutput) ToGkeClusterConfigResponseOutput() GkeClusterConfigResponseOutput

func (GkeClusterConfigResponseOutput) ToGkeClusterConfigResponseOutputWithContext

func (o GkeClusterConfigResponseOutput) ToGkeClusterConfigResponseOutputWithContext(ctx context.Context) GkeClusterConfigResponseOutput

type GkeNodeConfig added in v0.18.2

type GkeNodeConfig struct {
	// Optional. A list of hardware accelerators (https://cloud.google.com/compute/docs/gpus) to attach to each node.
	Accelerators []GkeNodePoolAcceleratorConfig `pulumi:"accelerators"`
	// Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}
	BootDiskKmsKey *string `pulumi:"bootDiskKmsKey"`
	// Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see Adding Local SSDs (https://cloud.google.com/compute/docs/disks/local-ssd)).
	LocalSsdCount *int `pulumi:"localSsdCount"`
	// Optional. The name of a Compute Engine machine type (https://cloud.google.com/compute/docs/machine-types).
	MachineType *string `pulumi:"machineType"`
	// Optional. Minimum CPU platform (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
	MinCpuPlatform *string `pulumi:"minCpuPlatform"`
	// Optional. Whether the nodes are created as legacy preemptible VM instances (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
	Preemptible *bool `pulumi:"preemptible"`
	// Optional. Whether the nodes are created as Spot VM instances (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
	Spot *bool `pulumi:"spot"`
}

Parameters that describe cluster nodes.

type GkeNodeConfigArgs added in v0.18.2

type GkeNodeConfigArgs struct {
	// Optional. A list of hardware accelerators (https://cloud.google.com/compute/docs/gpus) to attach to each node.
	Accelerators GkeNodePoolAcceleratorConfigArrayInput `pulumi:"accelerators"`
	// Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}
	BootDiskKmsKey pulumi.StringPtrInput `pulumi:"bootDiskKmsKey"`
	// Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see Adding Local SSDs (https://cloud.google.com/compute/docs/disks/local-ssd)).
	LocalSsdCount pulumi.IntPtrInput `pulumi:"localSsdCount"`
	// Optional. The name of a Compute Engine machine type (https://cloud.google.com/compute/docs/machine-types).
	MachineType pulumi.StringPtrInput `pulumi:"machineType"`
	// Optional. Minimum CPU platform (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
	MinCpuPlatform pulumi.StringPtrInput `pulumi:"minCpuPlatform"`
	// Optional. Whether the nodes are created as legacy preemptible VM instances (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
	Preemptible pulumi.BoolPtrInput `pulumi:"preemptible"`
	// Optional. Whether the nodes are created as Spot VM instances (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
	Spot pulumi.BoolPtrInput `pulumi:"spot"`
}

Parameters that describe cluster nodes.

func (GkeNodeConfigArgs) ElementType added in v0.18.2

func (GkeNodeConfigArgs) ElementType() reflect.Type

func (GkeNodeConfigArgs) ToGkeNodeConfigOutput added in v0.18.2

func (i GkeNodeConfigArgs) ToGkeNodeConfigOutput() GkeNodeConfigOutput

func (GkeNodeConfigArgs) ToGkeNodeConfigOutputWithContext added in v0.18.2

func (i GkeNodeConfigArgs) ToGkeNodeConfigOutputWithContext(ctx context.Context) GkeNodeConfigOutput

func (GkeNodeConfigArgs) ToGkeNodeConfigPtrOutput added in v0.18.2

func (i GkeNodeConfigArgs) ToGkeNodeConfigPtrOutput() GkeNodeConfigPtrOutput

func (GkeNodeConfigArgs) ToGkeNodeConfigPtrOutputWithContext added in v0.18.2

func (i GkeNodeConfigArgs) ToGkeNodeConfigPtrOutputWithContext(ctx context.Context) GkeNodeConfigPtrOutput

type GkeNodeConfigInput added in v0.18.2

type GkeNodeConfigInput interface {
	pulumi.Input

	ToGkeNodeConfigOutput() GkeNodeConfigOutput
	ToGkeNodeConfigOutputWithContext(context.Context) GkeNodeConfigOutput
}

GkeNodeConfigInput is an input type that accepts GkeNodeConfigArgs and GkeNodeConfigOutput values. You can construct a concrete instance of `GkeNodeConfigInput` via:

GkeNodeConfigArgs{...}

type GkeNodeConfigOutput added in v0.18.2

type GkeNodeConfigOutput struct{ *pulumi.OutputState }

Parameters that describe cluster nodes.

func (GkeNodeConfigOutput) Accelerators added in v0.18.2

Optional. A list of hardware accelerators (https://cloud.google.com/compute/docs/gpus) to attach to each node.

func (GkeNodeConfigOutput) BootDiskKmsKey added in v0.21.0

func (o GkeNodeConfigOutput) BootDiskKmsKey() pulumi.StringPtrOutput

Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}

func (GkeNodeConfigOutput) ElementType added in v0.18.2

func (GkeNodeConfigOutput) ElementType() reflect.Type

func (GkeNodeConfigOutput) LocalSsdCount added in v0.18.2

func (o GkeNodeConfigOutput) LocalSsdCount() pulumi.IntPtrOutput

Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see Adding Local SSDs (https://cloud.google.com/compute/docs/disks/local-ssd)).

func (GkeNodeConfigOutput) MachineType added in v0.18.2

func (o GkeNodeConfigOutput) MachineType() pulumi.StringPtrOutput

Optional. The name of a Compute Engine machine type (https://cloud.google.com/compute/docs/machine-types).

func (GkeNodeConfigOutput) MinCpuPlatform added in v0.18.2

func (o GkeNodeConfigOutput) MinCpuPlatform() pulumi.StringPtrOutput

Optional. Minimum CPU platform (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell"` or Intel Sandy Bridge".

func (GkeNodeConfigOutput) Preemptible added in v0.18.2

func (o GkeNodeConfigOutput) Preemptible() pulumi.BoolPtrOutput

Optional. Whether the nodes are created as legacy preemptible VM instances (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).

func (GkeNodeConfigOutput) Spot added in v0.18.2

Optional. Whether the nodes are created as Spot VM instances (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).

func (GkeNodeConfigOutput) ToGkeNodeConfigOutput added in v0.18.2

func (o GkeNodeConfigOutput) ToGkeNodeConfigOutput() GkeNodeConfigOutput

func (GkeNodeConfigOutput) ToGkeNodeConfigOutputWithContext added in v0.18.2

func (o GkeNodeConfigOutput) ToGkeNodeConfigOutputWithContext(ctx context.Context) GkeNodeConfigOutput

func (GkeNodeConfigOutput) ToGkeNodeConfigPtrOutput added in v0.18.2

func (o GkeNodeConfigOutput) ToGkeNodeConfigPtrOutput() GkeNodeConfigPtrOutput

func (GkeNodeConfigOutput) ToGkeNodeConfigPtrOutputWithContext added in v0.18.2

func (o GkeNodeConfigOutput) ToGkeNodeConfigPtrOutputWithContext(ctx context.Context) GkeNodeConfigPtrOutput

type GkeNodeConfigPtrInput added in v0.18.2

type GkeNodeConfigPtrInput interface {
	pulumi.Input

	ToGkeNodeConfigPtrOutput() GkeNodeConfigPtrOutput
	ToGkeNodeConfigPtrOutputWithContext(context.Context) GkeNodeConfigPtrOutput
}

GkeNodeConfigPtrInput is an input type that accepts GkeNodeConfigArgs, GkeNodeConfigPtr and GkeNodeConfigPtrOutput values. You can construct a concrete instance of `GkeNodeConfigPtrInput` via:

        GkeNodeConfigArgs{...}

or:

        nil

func GkeNodeConfigPtr added in v0.18.2

func GkeNodeConfigPtr(v *GkeNodeConfigArgs) GkeNodeConfigPtrInput

type GkeNodeConfigPtrOutput added in v0.18.2

type GkeNodeConfigPtrOutput struct{ *pulumi.OutputState }

func (GkeNodeConfigPtrOutput) Accelerators added in v0.18.2

Optional. A list of hardware accelerators (https://cloud.google.com/compute/docs/gpus) to attach to each node.

func (GkeNodeConfigPtrOutput) BootDiskKmsKey added in v0.21.0

func (o GkeNodeConfigPtrOutput) BootDiskKmsKey() pulumi.StringPtrOutput

Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}

func (GkeNodeConfigPtrOutput) Elem added in v0.18.2

func (GkeNodeConfigPtrOutput) ElementType added in v0.18.2

func (GkeNodeConfigPtrOutput) ElementType() reflect.Type

func (GkeNodeConfigPtrOutput) LocalSsdCount added in v0.18.2

func (o GkeNodeConfigPtrOutput) LocalSsdCount() pulumi.IntPtrOutput

Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see Adding Local SSDs (https://cloud.google.com/compute/docs/disks/local-ssd)).

func (GkeNodeConfigPtrOutput) MachineType added in v0.18.2

Optional. The name of a Compute Engine machine type (https://cloud.google.com/compute/docs/machine-types).

func (GkeNodeConfigPtrOutput) MinCpuPlatform added in v0.18.2

func (o GkeNodeConfigPtrOutput) MinCpuPlatform() pulumi.StringPtrOutput

Optional. Minimum CPU platform (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell"` or Intel Sandy Bridge".

func (GkeNodeConfigPtrOutput) Preemptible added in v0.18.2

Optional. Whether the nodes are created as legacy preemptible VM instances (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).

func (GkeNodeConfigPtrOutput) Spot added in v0.18.2

Optional. Whether the nodes are created as Spot VM instances (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).

func (GkeNodeConfigPtrOutput) ToGkeNodeConfigPtrOutput added in v0.18.2

func (o GkeNodeConfigPtrOutput) ToGkeNodeConfigPtrOutput() GkeNodeConfigPtrOutput

func (GkeNodeConfigPtrOutput) ToGkeNodeConfigPtrOutputWithContext added in v0.18.2

func (o GkeNodeConfigPtrOutput) ToGkeNodeConfigPtrOutputWithContext(ctx context.Context) GkeNodeConfigPtrOutput

type GkeNodeConfigResponse added in v0.18.2

type GkeNodeConfigResponse struct {
	// Optional. A list of hardware accelerators (https://cloud.google.com/compute/docs/gpus) to attach to each node.
	Accelerators []GkeNodePoolAcceleratorConfigResponse `pulumi:"accelerators"`
	// Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}
	BootDiskKmsKey string `pulumi:"bootDiskKmsKey"`
	// Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see Adding Local SSDs (https://cloud.google.com/compute/docs/disks/local-ssd)).
	LocalSsdCount int `pulumi:"localSsdCount"`
	// Optional. The name of a Compute Engine machine type (https://cloud.google.com/compute/docs/machine-types).
	MachineType string `pulumi:"machineType"`
	// Optional. Minimum CPU platform (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
	MinCpuPlatform string `pulumi:"minCpuPlatform"`
	// Optional. Whether the nodes are created as legacy preemptible VM instances (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
	Preemptible bool `pulumi:"preemptible"`
	// Optional. Whether the nodes are created as Spot VM instances (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
	Spot bool `pulumi:"spot"`
}

Parameters that describe cluster nodes.

type GkeNodeConfigResponseOutput added in v0.18.2

type GkeNodeConfigResponseOutput struct{ *pulumi.OutputState }

Parameters that describe cluster nodes.

func (GkeNodeConfigResponseOutput) Accelerators added in v0.18.2

Optional. A list of hardware accelerators (https://cloud.google.com/compute/docs/gpus) to attach to each node.

func (GkeNodeConfigResponseOutput) BootDiskKmsKey added in v0.21.0

func (o GkeNodeConfigResponseOutput) BootDiskKmsKey() pulumi.StringOutput

Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}

func (GkeNodeConfigResponseOutput) ElementType added in v0.18.2

func (GkeNodeConfigResponseOutput) LocalSsdCount added in v0.18.2

func (o GkeNodeConfigResponseOutput) LocalSsdCount() pulumi.IntOutput

Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see Adding Local SSDs (https://cloud.google.com/compute/docs/disks/local-ssd)).

func (GkeNodeConfigResponseOutput) MachineType added in v0.18.2

Optional. The name of a Compute Engine machine type (https://cloud.google.com/compute/docs/machine-types).

func (GkeNodeConfigResponseOutput) MinCpuPlatform added in v0.18.2

func (o GkeNodeConfigResponseOutput) MinCpuPlatform() pulumi.StringOutput

Optional. Minimum CPU platform (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell"` or Intel Sandy Bridge".

func (GkeNodeConfigResponseOutput) Preemptible added in v0.18.2

Optional. Whether the nodes are created as legacy preemptible VM instances (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).

func (GkeNodeConfigResponseOutput) Spot added in v0.18.2

Optional. Whether the nodes are created as Spot VM instances (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).

func (GkeNodeConfigResponseOutput) ToGkeNodeConfigResponseOutput added in v0.18.2

func (o GkeNodeConfigResponseOutput) ToGkeNodeConfigResponseOutput() GkeNodeConfigResponseOutput

func (GkeNodeConfigResponseOutput) ToGkeNodeConfigResponseOutputWithContext added in v0.18.2

func (o GkeNodeConfigResponseOutput) ToGkeNodeConfigResponseOutputWithContext(ctx context.Context) GkeNodeConfigResponseOutput

type GkeNodePoolAcceleratorConfig added in v0.18.2

type GkeNodePoolAcceleratorConfig struct {
	// The number of accelerator cards exposed to an instance.
	AcceleratorCount *string `pulumi:"acceleratorCount"`
	// The accelerator type resource namename (see GPUs on Compute Engine).
	AcceleratorType *string `pulumi:"acceleratorType"`
	// Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning).
	GpuPartitionSize *string `pulumi:"gpuPartitionSize"`
}

A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request for a node pool.

type GkeNodePoolAcceleratorConfigArgs added in v0.18.2

type GkeNodePoolAcceleratorConfigArgs struct {
	// The number of accelerator cards exposed to an instance.
	AcceleratorCount pulumi.StringPtrInput `pulumi:"acceleratorCount"`
	// The accelerator type resource namename (see GPUs on Compute Engine).
	AcceleratorType pulumi.StringPtrInput `pulumi:"acceleratorType"`
	// Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning).
	GpuPartitionSize pulumi.StringPtrInput `pulumi:"gpuPartitionSize"`
}

A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request for a node pool.

func (GkeNodePoolAcceleratorConfigArgs) ElementType added in v0.18.2

func (GkeNodePoolAcceleratorConfigArgs) ToGkeNodePoolAcceleratorConfigOutput added in v0.18.2

func (i GkeNodePoolAcceleratorConfigArgs) ToGkeNodePoolAcceleratorConfigOutput() GkeNodePoolAcceleratorConfigOutput

func (GkeNodePoolAcceleratorConfigArgs) ToGkeNodePoolAcceleratorConfigOutputWithContext added in v0.18.2

func (i GkeNodePoolAcceleratorConfigArgs) ToGkeNodePoolAcceleratorConfigOutputWithContext(ctx context.Context) GkeNodePoolAcceleratorConfigOutput

type GkeNodePoolAcceleratorConfigArray added in v0.18.2

type GkeNodePoolAcceleratorConfigArray []GkeNodePoolAcceleratorConfigInput

func (GkeNodePoolAcceleratorConfigArray) ElementType added in v0.18.2

func (GkeNodePoolAcceleratorConfigArray) ToGkeNodePoolAcceleratorConfigArrayOutput added in v0.18.2

func (i GkeNodePoolAcceleratorConfigArray) ToGkeNodePoolAcceleratorConfigArrayOutput() GkeNodePoolAcceleratorConfigArrayOutput

func (GkeNodePoolAcceleratorConfigArray) ToGkeNodePoolAcceleratorConfigArrayOutputWithContext added in v0.18.2

func (i GkeNodePoolAcceleratorConfigArray) ToGkeNodePoolAcceleratorConfigArrayOutputWithContext(ctx context.Context) GkeNodePoolAcceleratorConfigArrayOutput

type GkeNodePoolAcceleratorConfigArrayInput added in v0.18.2

type GkeNodePoolAcceleratorConfigArrayInput interface {
	pulumi.Input

	ToGkeNodePoolAcceleratorConfigArrayOutput() GkeNodePoolAcceleratorConfigArrayOutput
	ToGkeNodePoolAcceleratorConfigArrayOutputWithContext(context.Context) GkeNodePoolAcceleratorConfigArrayOutput
}

GkeNodePoolAcceleratorConfigArrayInput is an input type that accepts GkeNodePoolAcceleratorConfigArray and GkeNodePoolAcceleratorConfigArrayOutput values. You can construct a concrete instance of `GkeNodePoolAcceleratorConfigArrayInput` via:

GkeNodePoolAcceleratorConfigArray{ GkeNodePoolAcceleratorConfigArgs{...} }

type GkeNodePoolAcceleratorConfigArrayOutput added in v0.18.2

type GkeNodePoolAcceleratorConfigArrayOutput struct{ *pulumi.OutputState }

func (GkeNodePoolAcceleratorConfigArrayOutput) ElementType added in v0.18.2

func (GkeNodePoolAcceleratorConfigArrayOutput) Index added in v0.18.2

func (GkeNodePoolAcceleratorConfigArrayOutput) ToGkeNodePoolAcceleratorConfigArrayOutput added in v0.18.2

func (o GkeNodePoolAcceleratorConfigArrayOutput) ToGkeNodePoolAcceleratorConfigArrayOutput() GkeNodePoolAcceleratorConfigArrayOutput

func (GkeNodePoolAcceleratorConfigArrayOutput) ToGkeNodePoolAcceleratorConfigArrayOutputWithContext added in v0.18.2

func (o GkeNodePoolAcceleratorConfigArrayOutput) ToGkeNodePoolAcceleratorConfigArrayOutputWithContext(ctx context.Context) GkeNodePoolAcceleratorConfigArrayOutput

type GkeNodePoolAcceleratorConfigInput added in v0.18.2

type GkeNodePoolAcceleratorConfigInput interface {
	pulumi.Input

	ToGkeNodePoolAcceleratorConfigOutput() GkeNodePoolAcceleratorConfigOutput
	ToGkeNodePoolAcceleratorConfigOutputWithContext(context.Context) GkeNodePoolAcceleratorConfigOutput
}

GkeNodePoolAcceleratorConfigInput is an input type that accepts GkeNodePoolAcceleratorConfigArgs and GkeNodePoolAcceleratorConfigOutput values. You can construct a concrete instance of `GkeNodePoolAcceleratorConfigInput` via:

GkeNodePoolAcceleratorConfigArgs{...}

type GkeNodePoolAcceleratorConfigOutput added in v0.18.2

type GkeNodePoolAcceleratorConfigOutput struct{ *pulumi.OutputState }

A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request for a node pool.

func (GkeNodePoolAcceleratorConfigOutput) AcceleratorCount added in v0.18.2

The number of accelerator cards exposed to an instance.

func (GkeNodePoolAcceleratorConfigOutput) AcceleratorType added in v0.18.2

The accelerator type resource namename (see GPUs on Compute Engine).

func (GkeNodePoolAcceleratorConfigOutput) ElementType added in v0.18.2

func (GkeNodePoolAcceleratorConfigOutput) GpuPartitionSize added in v0.18.2

Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning).

func (GkeNodePoolAcceleratorConfigOutput) ToGkeNodePoolAcceleratorConfigOutput added in v0.18.2

func (o GkeNodePoolAcceleratorConfigOutput) ToGkeNodePoolAcceleratorConfigOutput() GkeNodePoolAcceleratorConfigOutput

func (GkeNodePoolAcceleratorConfigOutput) ToGkeNodePoolAcceleratorConfigOutputWithContext added in v0.18.2

func (o GkeNodePoolAcceleratorConfigOutput) ToGkeNodePoolAcceleratorConfigOutputWithContext(ctx context.Context) GkeNodePoolAcceleratorConfigOutput

type GkeNodePoolAcceleratorConfigResponse added in v0.18.2

type GkeNodePoolAcceleratorConfigResponse struct {
	// The number of accelerator cards exposed to an instance.
	AcceleratorCount string `pulumi:"acceleratorCount"`
	// The accelerator type resource namename (see GPUs on Compute Engine).
	AcceleratorType string `pulumi:"acceleratorType"`
	// Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning).
	GpuPartitionSize string `pulumi:"gpuPartitionSize"`
}

A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request for a node pool.

type GkeNodePoolAcceleratorConfigResponseArrayOutput added in v0.18.2

type GkeNodePoolAcceleratorConfigResponseArrayOutput struct{ *pulumi.OutputState }

func (GkeNodePoolAcceleratorConfigResponseArrayOutput) ElementType added in v0.18.2

func (GkeNodePoolAcceleratorConfigResponseArrayOutput) Index added in v0.18.2

func (GkeNodePoolAcceleratorConfigResponseArrayOutput) ToGkeNodePoolAcceleratorConfigResponseArrayOutput added in v0.18.2

func (o GkeNodePoolAcceleratorConfigResponseArrayOutput) ToGkeNodePoolAcceleratorConfigResponseArrayOutput() GkeNodePoolAcceleratorConfigResponseArrayOutput

func (GkeNodePoolAcceleratorConfigResponseArrayOutput) ToGkeNodePoolAcceleratorConfigResponseArrayOutputWithContext added in v0.18.2

func (o GkeNodePoolAcceleratorConfigResponseArrayOutput) ToGkeNodePoolAcceleratorConfigResponseArrayOutputWithContext(ctx context.Context) GkeNodePoolAcceleratorConfigResponseArrayOutput

type GkeNodePoolAcceleratorConfigResponseOutput added in v0.18.2

type GkeNodePoolAcceleratorConfigResponseOutput struct{ *pulumi.OutputState }

A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request for a node pool.

func (GkeNodePoolAcceleratorConfigResponseOutput) AcceleratorCount added in v0.18.2

The number of accelerator cards exposed to an instance.

func (GkeNodePoolAcceleratorConfigResponseOutput) AcceleratorType added in v0.18.2

The accelerator type resource namename (see GPUs on Compute Engine).

func (GkeNodePoolAcceleratorConfigResponseOutput) ElementType added in v0.18.2

func (GkeNodePoolAcceleratorConfigResponseOutput) GpuPartitionSize added in v0.18.2

Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning).

func (GkeNodePoolAcceleratorConfigResponseOutput) ToGkeNodePoolAcceleratorConfigResponseOutput added in v0.18.2

func (o GkeNodePoolAcceleratorConfigResponseOutput) ToGkeNodePoolAcceleratorConfigResponseOutput() GkeNodePoolAcceleratorConfigResponseOutput

func (GkeNodePoolAcceleratorConfigResponseOutput) ToGkeNodePoolAcceleratorConfigResponseOutputWithContext added in v0.18.2

func (o GkeNodePoolAcceleratorConfigResponseOutput) ToGkeNodePoolAcceleratorConfigResponseOutputWithContext(ctx context.Context) GkeNodePoolAcceleratorConfigResponseOutput

type GkeNodePoolAutoscalingConfig added in v0.18.2

type GkeNodePoolAutoscalingConfig struct {
	// The maximum number of nodes in the node pool. Must be >= min_node_count, and must be > 0. Note: Quota must be sufficient to scale up the cluster.
	MaxNodeCount *int `pulumi:"maxNodeCount"`
	// The minimum number of nodes in the node pool. Must be >= 0 and <= max_node_count.
	MinNodeCount *int `pulumi:"minNodeCount"`
}

GkeNodePoolAutoscaling contains information the cluster autoscaler needs to adjust the size of the node pool to the current cluster usage.

type GkeNodePoolAutoscalingConfigArgs added in v0.18.2

type GkeNodePoolAutoscalingConfigArgs struct {
	// The maximum number of nodes in the node pool. Must be >= min_node_count, and must be > 0. Note: Quota must be sufficient to scale up the cluster.
	MaxNodeCount pulumi.IntPtrInput `pulumi:"maxNodeCount"`
	// The minimum number of nodes in the node pool. Must be >= 0 and <= max_node_count.
	MinNodeCount pulumi.IntPtrInput `pulumi:"minNodeCount"`
}

GkeNodePoolAutoscaling contains information the cluster autoscaler needs to adjust the size of the node pool to the current cluster usage.

func (GkeNodePoolAutoscalingConfigArgs) ElementType added in v0.18.2

func (GkeNodePoolAutoscalingConfigArgs) ToGkeNodePoolAutoscalingConfigOutput added in v0.18.2

func (i GkeNodePoolAutoscalingConfigArgs) ToGkeNodePoolAutoscalingConfigOutput() GkeNodePoolAutoscalingConfigOutput

func (GkeNodePoolAutoscalingConfigArgs) ToGkeNodePoolAutoscalingConfigOutputWithContext added in v0.18.2

func (i GkeNodePoolAutoscalingConfigArgs) ToGkeNodePoolAutoscalingConfigOutputWithContext(ctx context.Context) GkeNodePoolAutoscalingConfigOutput

func (GkeNodePoolAutoscalingConfigArgs) ToGkeNodePoolAutoscalingConfigPtrOutput added in v0.18.2

func (i GkeNodePoolAutoscalingConfigArgs) ToGkeNodePoolAutoscalingConfigPtrOutput() GkeNodePoolAutoscalingConfigPtrOutput

func (GkeNodePoolAutoscalingConfigArgs) ToGkeNodePoolAutoscalingConfigPtrOutputWithContext added in v0.18.2

func (i GkeNodePoolAutoscalingConfigArgs) ToGkeNodePoolAutoscalingConfigPtrOutputWithContext(ctx context.Context) GkeNodePoolAutoscalingConfigPtrOutput

type GkeNodePoolAutoscalingConfigInput added in v0.18.2

type GkeNodePoolAutoscalingConfigInput interface {
	pulumi.Input

	ToGkeNodePoolAutoscalingConfigOutput() GkeNodePoolAutoscalingConfigOutput
	ToGkeNodePoolAutoscalingConfigOutputWithContext(context.Context) GkeNodePoolAutoscalingConfigOutput
}

GkeNodePoolAutoscalingConfigInput is an input type that accepts GkeNodePoolAutoscalingConfigArgs and GkeNodePoolAutoscalingConfigOutput values. You can construct a concrete instance of `GkeNodePoolAutoscalingConfigInput` via:

GkeNodePoolAutoscalingConfigArgs{...}

type GkeNodePoolAutoscalingConfigOutput added in v0.18.2

type GkeNodePoolAutoscalingConfigOutput struct{ *pulumi.OutputState }

GkeNodePoolAutoscaling contains information the cluster autoscaler needs to adjust the size of the node pool to the current cluster usage.

func (GkeNodePoolAutoscalingConfigOutput) ElementType added in v0.18.2

func (GkeNodePoolAutoscalingConfigOutput) MaxNodeCount added in v0.18.2

The maximum number of nodes in the node pool. Must be >= min_node_count, and must be > 0. Note: Quota must be sufficient to scale up the cluster.

func (GkeNodePoolAutoscalingConfigOutput) MinNodeCount added in v0.18.2

The minimum number of nodes in the node pool. Must be >= 0 and <= max_node_count.

func (GkeNodePoolAutoscalingConfigOutput) ToGkeNodePoolAutoscalingConfigOutput added in v0.18.2

func (o GkeNodePoolAutoscalingConfigOutput) ToGkeNodePoolAutoscalingConfigOutput() GkeNodePoolAutoscalingConfigOutput

func (GkeNodePoolAutoscalingConfigOutput) ToGkeNodePoolAutoscalingConfigOutputWithContext added in v0.18.2

func (o GkeNodePoolAutoscalingConfigOutput) ToGkeNodePoolAutoscalingConfigOutputWithContext(ctx context.Context) GkeNodePoolAutoscalingConfigOutput

func (GkeNodePoolAutoscalingConfigOutput) ToGkeNodePoolAutoscalingConfigPtrOutput added in v0.18.2

func (o GkeNodePoolAutoscalingConfigOutput) ToGkeNodePoolAutoscalingConfigPtrOutput() GkeNodePoolAutoscalingConfigPtrOutput

func (GkeNodePoolAutoscalingConfigOutput) ToGkeNodePoolAutoscalingConfigPtrOutputWithContext added in v0.18.2

func (o GkeNodePoolAutoscalingConfigOutput) ToGkeNodePoolAutoscalingConfigPtrOutputWithContext(ctx context.Context) GkeNodePoolAutoscalingConfigPtrOutput

type GkeNodePoolAutoscalingConfigPtrInput added in v0.18.2

type GkeNodePoolAutoscalingConfigPtrInput interface {
	pulumi.Input

	ToGkeNodePoolAutoscalingConfigPtrOutput() GkeNodePoolAutoscalingConfigPtrOutput
	ToGkeNodePoolAutoscalingConfigPtrOutputWithContext(context.Context) GkeNodePoolAutoscalingConfigPtrOutput
}

GkeNodePoolAutoscalingConfigPtrInput is an input type that accepts GkeNodePoolAutoscalingConfigArgs, GkeNodePoolAutoscalingConfigPtr and GkeNodePoolAutoscalingConfigPtrOutput values. You can construct a concrete instance of `GkeNodePoolAutoscalingConfigPtrInput` via:

        GkeNodePoolAutoscalingConfigArgs{...}

or:

        nil

func GkeNodePoolAutoscalingConfigPtr added in v0.18.2

type GkeNodePoolAutoscalingConfigPtrOutput added in v0.18.2

type GkeNodePoolAutoscalingConfigPtrOutput struct{ *pulumi.OutputState }

func (GkeNodePoolAutoscalingConfigPtrOutput) Elem added in v0.18.2

func (GkeNodePoolAutoscalingConfigPtrOutput) ElementType added in v0.18.2

func (GkeNodePoolAutoscalingConfigPtrOutput) MaxNodeCount added in v0.18.2

The maximum number of nodes in the node pool. Must be >= min_node_count, and must be > 0. Note: Quota must be sufficient to scale up the cluster.

func (GkeNodePoolAutoscalingConfigPtrOutput) MinNodeCount added in v0.18.2

The minimum number of nodes in the node pool. Must be >= 0 and <= max_node_count.

func (GkeNodePoolAutoscalingConfigPtrOutput) ToGkeNodePoolAutoscalingConfigPtrOutput added in v0.18.2

func (o GkeNodePoolAutoscalingConfigPtrOutput) ToGkeNodePoolAutoscalingConfigPtrOutput() GkeNodePoolAutoscalingConfigPtrOutput

func (GkeNodePoolAutoscalingConfigPtrOutput) ToGkeNodePoolAutoscalingConfigPtrOutputWithContext added in v0.18.2

func (o GkeNodePoolAutoscalingConfigPtrOutput) ToGkeNodePoolAutoscalingConfigPtrOutputWithContext(ctx context.Context) GkeNodePoolAutoscalingConfigPtrOutput

type GkeNodePoolAutoscalingConfigResponse added in v0.18.2

type GkeNodePoolAutoscalingConfigResponse struct {
	// The maximum number of nodes in the node pool. Must be >= min_node_count, and must be > 0. Note: Quota must be sufficient to scale up the cluster.
	MaxNodeCount int `pulumi:"maxNodeCount"`
	// The minimum number of nodes in the node pool. Must be >= 0 and <= max_node_count.
	MinNodeCount int `pulumi:"minNodeCount"`
}

GkeNodePoolAutoscaling contains information the cluster autoscaler needs to adjust the size of the node pool to the current cluster usage.

type GkeNodePoolAutoscalingConfigResponseOutput added in v0.18.2

type GkeNodePoolAutoscalingConfigResponseOutput struct{ *pulumi.OutputState }

GkeNodePoolAutoscaling contains information the cluster autoscaler needs to adjust the size of the node pool to the current cluster usage.

func (GkeNodePoolAutoscalingConfigResponseOutput) ElementType added in v0.18.2

func (GkeNodePoolAutoscalingConfigResponseOutput) MaxNodeCount added in v0.18.2

The maximum number of nodes in the node pool. Must be >= min_node_count, and must be > 0. Note: Quota must be sufficient to scale up the cluster.

func (GkeNodePoolAutoscalingConfigResponseOutput) MinNodeCount added in v0.18.2

The minimum number of nodes in the node pool. Must be >= 0 and <= max_node_count.

func (GkeNodePoolAutoscalingConfigResponseOutput) ToGkeNodePoolAutoscalingConfigResponseOutput added in v0.18.2

func (o GkeNodePoolAutoscalingConfigResponseOutput) ToGkeNodePoolAutoscalingConfigResponseOutput() GkeNodePoolAutoscalingConfigResponseOutput

func (GkeNodePoolAutoscalingConfigResponseOutput) ToGkeNodePoolAutoscalingConfigResponseOutputWithContext added in v0.18.2

func (o GkeNodePoolAutoscalingConfigResponseOutput) ToGkeNodePoolAutoscalingConfigResponseOutputWithContext(ctx context.Context) GkeNodePoolAutoscalingConfigResponseOutput

type GkeNodePoolConfig added in v0.18.2

type GkeNodePoolConfig struct {
	// Optional. The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.
	Autoscaling *GkeNodePoolAutoscalingConfig `pulumi:"autoscaling"`
	// Optional. The node pool configuration.
	Config *GkeNodeConfig `pulumi:"config"`
	// Optional. The list of Compute Engine zones (https://cloud.google.com/compute/docs/zones#available) where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.Note: All node pools associated with a virtual cluster must be located in the same region as the virtual cluster, and they must be located in the same zone within that region.If a location is not specified during node pool creation, Dataproc on GKE will choose the zone.
	Locations []string `pulumi:"locations"`
}

The configuration of a GKE node pool used by a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).

type GkeNodePoolConfigArgs added in v0.18.2

type GkeNodePoolConfigArgs struct {
	// Optional. The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.
	Autoscaling GkeNodePoolAutoscalingConfigPtrInput `pulumi:"autoscaling"`
	// Optional. The node pool configuration.
	Config GkeNodeConfigPtrInput `pulumi:"config"`
	// Optional. The list of Compute Engine zones (https://cloud.google.com/compute/docs/zones#available) where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.Note: All node pools associated with a virtual cluster must be located in the same region as the virtual cluster, and they must be located in the same zone within that region.If a location is not specified during node pool creation, Dataproc on GKE will choose the zone.
	Locations pulumi.StringArrayInput `pulumi:"locations"`
}

The configuration of a GKE node pool used by a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).

func (GkeNodePoolConfigArgs) ElementType added in v0.18.2

func (GkeNodePoolConfigArgs) ElementType() reflect.Type

func (GkeNodePoolConfigArgs) ToGkeNodePoolConfigOutput added in v0.18.2

func (i GkeNodePoolConfigArgs) ToGkeNodePoolConfigOutput() GkeNodePoolConfigOutput

func (GkeNodePoolConfigArgs) ToGkeNodePoolConfigOutputWithContext added in v0.18.2

func (i GkeNodePoolConfigArgs) ToGkeNodePoolConfigOutputWithContext(ctx context.Context) GkeNodePoolConfigOutput

func (GkeNodePoolConfigArgs) ToGkeNodePoolConfigPtrOutput added in v0.18.2

func (i GkeNodePoolConfigArgs) ToGkeNodePoolConfigPtrOutput() GkeNodePoolConfigPtrOutput

func (GkeNodePoolConfigArgs) ToGkeNodePoolConfigPtrOutputWithContext added in v0.18.2

func (i GkeNodePoolConfigArgs) ToGkeNodePoolConfigPtrOutputWithContext(ctx context.Context) GkeNodePoolConfigPtrOutput

type GkeNodePoolConfigInput added in v0.18.2

type GkeNodePoolConfigInput interface {
	pulumi.Input

	ToGkeNodePoolConfigOutput() GkeNodePoolConfigOutput
	ToGkeNodePoolConfigOutputWithContext(context.Context) GkeNodePoolConfigOutput
}

GkeNodePoolConfigInput is an input type that accepts GkeNodePoolConfigArgs and GkeNodePoolConfigOutput values. You can construct a concrete instance of `GkeNodePoolConfigInput` via:

GkeNodePoolConfigArgs{...}

type GkeNodePoolConfigOutput added in v0.18.2

type GkeNodePoolConfigOutput struct{ *pulumi.OutputState }

The configuration of a GKE node pool used by a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).

func (GkeNodePoolConfigOutput) Autoscaling added in v0.18.2

Optional. The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.

func (GkeNodePoolConfigOutput) Config added in v0.18.2

Optional. The node pool configuration.

func (GkeNodePoolConfigOutput) ElementType added in v0.18.2

func (GkeNodePoolConfigOutput) ElementType() reflect.Type

func (GkeNodePoolConfigOutput) Locations added in v0.18.2

Optional. The list of Compute Engine zones (https://cloud.google.com/compute/docs/zones#available) where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.Note: All node pools associated with a virtual cluster must be located in the same region as the virtual cluster, and they must be located in the same zone within that region.If a location is not specified during node pool creation, Dataproc on GKE will choose the zone.

func (GkeNodePoolConfigOutput) ToGkeNodePoolConfigOutput added in v0.18.2

func (o GkeNodePoolConfigOutput) ToGkeNodePoolConfigOutput() GkeNodePoolConfigOutput

func (GkeNodePoolConfigOutput) ToGkeNodePoolConfigOutputWithContext added in v0.18.2

func (o GkeNodePoolConfigOutput) ToGkeNodePoolConfigOutputWithContext(ctx context.Context) GkeNodePoolConfigOutput

func (GkeNodePoolConfigOutput) ToGkeNodePoolConfigPtrOutput added in v0.18.2

func (o GkeNodePoolConfigOutput) ToGkeNodePoolConfigPtrOutput() GkeNodePoolConfigPtrOutput

func (GkeNodePoolConfigOutput) ToGkeNodePoolConfigPtrOutputWithContext added in v0.18.2

func (o GkeNodePoolConfigOutput) ToGkeNodePoolConfigPtrOutputWithContext(ctx context.Context) GkeNodePoolConfigPtrOutput

type GkeNodePoolConfigPtrInput added in v0.18.2

type GkeNodePoolConfigPtrInput interface {
	pulumi.Input

	ToGkeNodePoolConfigPtrOutput() GkeNodePoolConfigPtrOutput
	ToGkeNodePoolConfigPtrOutputWithContext(context.Context) GkeNodePoolConfigPtrOutput
}

GkeNodePoolConfigPtrInput is an input type that accepts GkeNodePoolConfigArgs, GkeNodePoolConfigPtr and GkeNodePoolConfigPtrOutput values. You can construct a concrete instance of `GkeNodePoolConfigPtrInput` via:

        GkeNodePoolConfigArgs{...}

or:

        nil

func GkeNodePoolConfigPtr added in v0.18.2

func GkeNodePoolConfigPtr(v *GkeNodePoolConfigArgs) GkeNodePoolConfigPtrInput

type GkeNodePoolConfigPtrOutput added in v0.18.2

type GkeNodePoolConfigPtrOutput struct{ *pulumi.OutputState }

func (GkeNodePoolConfigPtrOutput) Autoscaling added in v0.18.2

Optional. The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.

func (GkeNodePoolConfigPtrOutput) Config added in v0.18.2

Optional. The node pool configuration.

func (GkeNodePoolConfigPtrOutput) Elem added in v0.18.2

func (GkeNodePoolConfigPtrOutput) ElementType added in v0.18.2

func (GkeNodePoolConfigPtrOutput) ElementType() reflect.Type

func (GkeNodePoolConfigPtrOutput) Locations added in v0.18.2

Optional. The list of Compute Engine zones (https://cloud.google.com/compute/docs/zones#available) where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.Note: All node pools associated with a virtual cluster must be located in the same region as the virtual cluster, and they must be located in the same zone within that region.If a location is not specified during node pool creation, Dataproc on GKE will choose the zone.

func (GkeNodePoolConfigPtrOutput) ToGkeNodePoolConfigPtrOutput added in v0.18.2

func (o GkeNodePoolConfigPtrOutput) ToGkeNodePoolConfigPtrOutput() GkeNodePoolConfigPtrOutput

func (GkeNodePoolConfigPtrOutput) ToGkeNodePoolConfigPtrOutputWithContext added in v0.18.2

func (o GkeNodePoolConfigPtrOutput) ToGkeNodePoolConfigPtrOutputWithContext(ctx context.Context) GkeNodePoolConfigPtrOutput

type GkeNodePoolConfigResponse added in v0.18.2

type GkeNodePoolConfigResponse struct {
	// Optional. The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.
	Autoscaling GkeNodePoolAutoscalingConfigResponse `pulumi:"autoscaling"`
	// Optional. The node pool configuration.
	Config GkeNodeConfigResponse `pulumi:"config"`
	// Optional. The list of Compute Engine zones (https://cloud.google.com/compute/docs/zones#available) where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.Note: All node pools associated with a virtual cluster must be located in the same region as the virtual cluster, and they must be located in the same zone within that region.If a location is not specified during node pool creation, Dataproc on GKE will choose the zone.
	Locations []string `pulumi:"locations"`
}

The configuration of a GKE node pool used by a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).

type GkeNodePoolConfigResponseOutput added in v0.18.2

type GkeNodePoolConfigResponseOutput struct{ *pulumi.OutputState }

The configuration of a GKE node pool used by a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).

func (GkeNodePoolConfigResponseOutput) Autoscaling added in v0.18.2

Optional. The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.

func (GkeNodePoolConfigResponseOutput) Config added in v0.18.2

Optional. The node pool configuration.

func (GkeNodePoolConfigResponseOutput) ElementType added in v0.18.2

func (GkeNodePoolConfigResponseOutput) Locations added in v0.18.2

Optional. The list of Compute Engine zones (https://cloud.google.com/compute/docs/zones#available) where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.Note: All node pools associated with a virtual cluster must be located in the same region as the virtual cluster, and they must be located in the same zone within that region.If a location is not specified during node pool creation, Dataproc on GKE will choose the zone.

func (GkeNodePoolConfigResponseOutput) ToGkeNodePoolConfigResponseOutput added in v0.18.2

func (o GkeNodePoolConfigResponseOutput) ToGkeNodePoolConfigResponseOutput() GkeNodePoolConfigResponseOutput

func (GkeNodePoolConfigResponseOutput) ToGkeNodePoolConfigResponseOutputWithContext added in v0.18.2

func (o GkeNodePoolConfigResponseOutput) ToGkeNodePoolConfigResponseOutputWithContext(ctx context.Context) GkeNodePoolConfigResponseOutput

type GkeNodePoolTarget added in v0.18.2

type GkeNodePoolTarget struct {
	// The target GKE node pool. Format: 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
	NodePool string `pulumi:"nodePool"`
	// Input only. The configuration for the GKE node pool.If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.If omitted, any node pool with the specified name is used. If a node pool with the specified name does not exist, Dataproc create a node pool with default values.This is an input only field. It will not be returned by the API.
	NodePoolConfig *GkeNodePoolConfig `pulumi:"nodePoolConfig"`
	// The roles associated with the GKE node pool.
	Roles []GkeNodePoolTargetRolesItem `pulumi:"roles"`
}

GKE node pools that Dataproc workloads run on.

type GkeNodePoolTargetArgs added in v0.18.2

type GkeNodePoolTargetArgs struct {
	// The target GKE node pool. Format: 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
	NodePool pulumi.StringInput `pulumi:"nodePool"`
	// Input only. The configuration for the GKE node pool.If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.If omitted, any node pool with the specified name is used. If a node pool with the specified name does not exist, Dataproc create a node pool with default values.This is an input only field. It will not be returned by the API.
	NodePoolConfig GkeNodePoolConfigPtrInput `pulumi:"nodePoolConfig"`
	// The roles associated with the GKE node pool.
	Roles GkeNodePoolTargetRolesItemArrayInput `pulumi:"roles"`
}

GKE node pools that Dataproc workloads run on.

func (GkeNodePoolTargetArgs) ElementType added in v0.18.2

func (GkeNodePoolTargetArgs) ElementType() reflect.Type

func (GkeNodePoolTargetArgs) ToGkeNodePoolTargetOutput added in v0.18.2

func (i GkeNodePoolTargetArgs) ToGkeNodePoolTargetOutput() GkeNodePoolTargetOutput

func (GkeNodePoolTargetArgs) ToGkeNodePoolTargetOutputWithContext added in v0.18.2

func (i GkeNodePoolTargetArgs) ToGkeNodePoolTargetOutputWithContext(ctx context.Context) GkeNodePoolTargetOutput

type GkeNodePoolTargetArray added in v0.18.2

type GkeNodePoolTargetArray []GkeNodePoolTargetInput

func (GkeNodePoolTargetArray) ElementType added in v0.18.2

func (GkeNodePoolTargetArray) ElementType() reflect.Type

func (GkeNodePoolTargetArray) ToGkeNodePoolTargetArrayOutput added in v0.18.2

func (i GkeNodePoolTargetArray) ToGkeNodePoolTargetArrayOutput() GkeNodePoolTargetArrayOutput

func (GkeNodePoolTargetArray) ToGkeNodePoolTargetArrayOutputWithContext added in v0.18.2

func (i GkeNodePoolTargetArray) ToGkeNodePoolTargetArrayOutputWithContext(ctx context.Context) GkeNodePoolTargetArrayOutput

type GkeNodePoolTargetArrayInput added in v0.18.2

type GkeNodePoolTargetArrayInput interface {
	pulumi.Input

	ToGkeNodePoolTargetArrayOutput() GkeNodePoolTargetArrayOutput
	ToGkeNodePoolTargetArrayOutputWithContext(context.Context) GkeNodePoolTargetArrayOutput
}

GkeNodePoolTargetArrayInput is an input type that accepts GkeNodePoolTargetArray and GkeNodePoolTargetArrayOutput values. You can construct a concrete instance of `GkeNodePoolTargetArrayInput` via:

GkeNodePoolTargetArray{ GkeNodePoolTargetArgs{...} }

type GkeNodePoolTargetArrayOutput added in v0.18.2

type GkeNodePoolTargetArrayOutput struct{ *pulumi.OutputState }

func (GkeNodePoolTargetArrayOutput) ElementType added in v0.18.2

func (GkeNodePoolTargetArrayOutput) Index added in v0.18.2

func (GkeNodePoolTargetArrayOutput) ToGkeNodePoolTargetArrayOutput added in v0.18.2

func (o GkeNodePoolTargetArrayOutput) ToGkeNodePoolTargetArrayOutput() GkeNodePoolTargetArrayOutput

func (GkeNodePoolTargetArrayOutput) ToGkeNodePoolTargetArrayOutputWithContext added in v0.18.2

func (o GkeNodePoolTargetArrayOutput) ToGkeNodePoolTargetArrayOutputWithContext(ctx context.Context) GkeNodePoolTargetArrayOutput

type GkeNodePoolTargetInput added in v0.18.2

type GkeNodePoolTargetInput interface {
	pulumi.Input

	ToGkeNodePoolTargetOutput() GkeNodePoolTargetOutput
	ToGkeNodePoolTargetOutputWithContext(context.Context) GkeNodePoolTargetOutput
}

GkeNodePoolTargetInput is an input type that accepts GkeNodePoolTargetArgs and GkeNodePoolTargetOutput values. You can construct a concrete instance of `GkeNodePoolTargetInput` via:

GkeNodePoolTargetArgs{...}

type GkeNodePoolTargetOutput added in v0.18.2

type GkeNodePoolTargetOutput struct{ *pulumi.OutputState }

GKE node pools that Dataproc workloads run on.

func (GkeNodePoolTargetOutput) ElementType added in v0.18.2

func (GkeNodePoolTargetOutput) ElementType() reflect.Type

func (GkeNodePoolTargetOutput) NodePool added in v0.18.2

The target GKE node pool. Format: 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'

func (GkeNodePoolTargetOutput) NodePoolConfig added in v0.18.2

Input only. The configuration for the GKE node pool.If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.If omitted, any node pool with the specified name is used. If a node pool with the specified name does not exist, Dataproc create a node pool with default values.This is an input only field. It will not be returned by the API.

func (GkeNodePoolTargetOutput) Roles added in v0.18.2

The roles associated with the GKE node pool.

func (GkeNodePoolTargetOutput) ToGkeNodePoolTargetOutput added in v0.18.2

func (o GkeNodePoolTargetOutput) ToGkeNodePoolTargetOutput() GkeNodePoolTargetOutput

func (GkeNodePoolTargetOutput) ToGkeNodePoolTargetOutputWithContext added in v0.18.2

func (o GkeNodePoolTargetOutput) ToGkeNodePoolTargetOutputWithContext(ctx context.Context) GkeNodePoolTargetOutput

type GkeNodePoolTargetResponse added in v0.18.2

type GkeNodePoolTargetResponse struct {
	// The target GKE node pool. Format: 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
	NodePool string `pulumi:"nodePool"`
	// Input only. The configuration for the GKE node pool.If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.If omitted, any node pool with the specified name is used. If a node pool with the specified name does not exist, Dataproc create a node pool with default values.This is an input only field. It will not be returned by the API.
	NodePoolConfig GkeNodePoolConfigResponse `pulumi:"nodePoolConfig"`
	// The roles associated with the GKE node pool.
	Roles []string `pulumi:"roles"`
}

GKE node pools that Dataproc workloads run on.

type GkeNodePoolTargetResponseArrayOutput added in v0.18.2

type GkeNodePoolTargetResponseArrayOutput struct{ *pulumi.OutputState }

func (GkeNodePoolTargetResponseArrayOutput) ElementType added in v0.18.2

func (GkeNodePoolTargetResponseArrayOutput) Index added in v0.18.2

func (GkeNodePoolTargetResponseArrayOutput) ToGkeNodePoolTargetResponseArrayOutput added in v0.18.2

func (o GkeNodePoolTargetResponseArrayOutput) ToGkeNodePoolTargetResponseArrayOutput() GkeNodePoolTargetResponseArrayOutput

func (GkeNodePoolTargetResponseArrayOutput) ToGkeNodePoolTargetResponseArrayOutputWithContext added in v0.18.2

func (o GkeNodePoolTargetResponseArrayOutput) ToGkeNodePoolTargetResponseArrayOutputWithContext(ctx context.Context) GkeNodePoolTargetResponseArrayOutput

type GkeNodePoolTargetResponseOutput added in v0.18.2

type GkeNodePoolTargetResponseOutput struct{ *pulumi.OutputState }

GKE node pools that Dataproc workloads run on.

func (GkeNodePoolTargetResponseOutput) ElementType added in v0.18.2

func (GkeNodePoolTargetResponseOutput) NodePool added in v0.18.2

The target GKE node pool. Format: 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'

func (GkeNodePoolTargetResponseOutput) NodePoolConfig added in v0.18.2

Input only. The configuration for the GKE node pool.If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.If omitted, any node pool with the specified name is used. If a node pool with the specified name does not exist, Dataproc create a node pool with default values.This is an input only field. It will not be returned by the API.

func (GkeNodePoolTargetResponseOutput) Roles added in v0.18.2

The roles associated with the GKE node pool.

func (GkeNodePoolTargetResponseOutput) ToGkeNodePoolTargetResponseOutput added in v0.18.2

func (o GkeNodePoolTargetResponseOutput) ToGkeNodePoolTargetResponseOutput() GkeNodePoolTargetResponseOutput

func (GkeNodePoolTargetResponseOutput) ToGkeNodePoolTargetResponseOutputWithContext added in v0.18.2

func (o GkeNodePoolTargetResponseOutput) ToGkeNodePoolTargetResponseOutputWithContext(ctx context.Context) GkeNodePoolTargetResponseOutput

type GkeNodePoolTargetRolesItem added in v0.18.2

type GkeNodePoolTargetRolesItem string

func (GkeNodePoolTargetRolesItem) ElementType added in v0.18.2

func (GkeNodePoolTargetRolesItem) ElementType() reflect.Type

func (GkeNodePoolTargetRolesItem) ToGkeNodePoolTargetRolesItemOutput added in v0.18.2

func (e GkeNodePoolTargetRolesItem) ToGkeNodePoolTargetRolesItemOutput() GkeNodePoolTargetRolesItemOutput

func (GkeNodePoolTargetRolesItem) ToGkeNodePoolTargetRolesItemOutputWithContext added in v0.18.2

func (e GkeNodePoolTargetRolesItem) ToGkeNodePoolTargetRolesItemOutputWithContext(ctx context.Context) GkeNodePoolTargetRolesItemOutput

func (GkeNodePoolTargetRolesItem) ToGkeNodePoolTargetRolesItemPtrOutput added in v0.18.2

func (e GkeNodePoolTargetRolesItem) ToGkeNodePoolTargetRolesItemPtrOutput() GkeNodePoolTargetRolesItemPtrOutput

func (GkeNodePoolTargetRolesItem) ToGkeNodePoolTargetRolesItemPtrOutputWithContext added in v0.18.2

func (e GkeNodePoolTargetRolesItem) ToGkeNodePoolTargetRolesItemPtrOutputWithContext(ctx context.Context) GkeNodePoolTargetRolesItemPtrOutput

func (GkeNodePoolTargetRolesItem) ToStringOutput added in v0.18.2

func (e GkeNodePoolTargetRolesItem) ToStringOutput() pulumi.StringOutput

func (GkeNodePoolTargetRolesItem) ToStringOutputWithContext added in v0.18.2

func (e GkeNodePoolTargetRolesItem) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput

func (GkeNodePoolTargetRolesItem) ToStringPtrOutput added in v0.18.2

func (e GkeNodePoolTargetRolesItem) ToStringPtrOutput() pulumi.StringPtrOutput

func (GkeNodePoolTargetRolesItem) ToStringPtrOutputWithContext added in v0.18.2

func (e GkeNodePoolTargetRolesItem) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput

type GkeNodePoolTargetRolesItemArray added in v0.18.2

type GkeNodePoolTargetRolesItemArray []GkeNodePoolTargetRolesItem

func (GkeNodePoolTargetRolesItemArray) ElementType added in v0.18.2

func (GkeNodePoolTargetRolesItemArray) ToGkeNodePoolTargetRolesItemArrayOutput added in v0.18.2

func (i GkeNodePoolTargetRolesItemArray) ToGkeNodePoolTargetRolesItemArrayOutput() GkeNodePoolTargetRolesItemArrayOutput

func (GkeNodePoolTargetRolesItemArray) ToGkeNodePoolTargetRolesItemArrayOutputWithContext added in v0.18.2

func (i GkeNodePoolTargetRolesItemArray) ToGkeNodePoolTargetRolesItemArrayOutputWithContext(ctx context.Context) GkeNodePoolTargetRolesItemArrayOutput

type GkeNodePoolTargetRolesItemArrayInput added in v0.18.2

type GkeNodePoolTargetRolesItemArrayInput interface {
	pulumi.Input

	ToGkeNodePoolTargetRolesItemArrayOutput() GkeNodePoolTargetRolesItemArrayOutput
	ToGkeNodePoolTargetRolesItemArrayOutputWithContext(context.Context) GkeNodePoolTargetRolesItemArrayOutput
}

GkeNodePoolTargetRolesItemArrayInput is an input type that accepts GkeNodePoolTargetRolesItemArray and GkeNodePoolTargetRolesItemArrayOutput values. You can construct a concrete instance of `GkeNodePoolTargetRolesItemArrayInput` via:

GkeNodePoolTargetRolesItemArray{ GkeNodePoolTargetRolesItemArgs{...} }

type GkeNodePoolTargetRolesItemArrayOutput added in v0.18.2

type GkeNodePoolTargetRolesItemArrayOutput struct{ *pulumi.OutputState }

func (GkeNodePoolTargetRolesItemArrayOutput) ElementType added in v0.18.2

func (GkeNodePoolTargetRolesItemArrayOutput) Index added in v0.18.2

func (GkeNodePoolTargetRolesItemArrayOutput) ToGkeNodePoolTargetRolesItemArrayOutput added in v0.18.2

func (o GkeNodePoolTargetRolesItemArrayOutput) ToGkeNodePoolTargetRolesItemArrayOutput() GkeNodePoolTargetRolesItemArrayOutput

func (GkeNodePoolTargetRolesItemArrayOutput) ToGkeNodePoolTargetRolesItemArrayOutputWithContext added in v0.18.2

func (o GkeNodePoolTargetRolesItemArrayOutput) ToGkeNodePoolTargetRolesItemArrayOutputWithContext(ctx context.Context) GkeNodePoolTargetRolesItemArrayOutput

type GkeNodePoolTargetRolesItemInput added in v0.18.2

type GkeNodePoolTargetRolesItemInput interface {
	pulumi.Input

	ToGkeNodePoolTargetRolesItemOutput() GkeNodePoolTargetRolesItemOutput
	ToGkeNodePoolTargetRolesItemOutputWithContext(context.Context) GkeNodePoolTargetRolesItemOutput
}

GkeNodePoolTargetRolesItemInput is an input type that accepts GkeNodePoolTargetRolesItemArgs and GkeNodePoolTargetRolesItemOutput values. You can construct a concrete instance of `GkeNodePoolTargetRolesItemInput` via:

GkeNodePoolTargetRolesItemArgs{...}

type GkeNodePoolTargetRolesItemOutput added in v0.18.2

type GkeNodePoolTargetRolesItemOutput struct{ *pulumi.OutputState }

func (GkeNodePoolTargetRolesItemOutput) ElementType added in v0.18.2

func (GkeNodePoolTargetRolesItemOutput) ToGkeNodePoolTargetRolesItemOutput added in v0.18.2

func (o GkeNodePoolTargetRolesItemOutput) ToGkeNodePoolTargetRolesItemOutput() GkeNodePoolTargetRolesItemOutput

func (GkeNodePoolTargetRolesItemOutput) ToGkeNodePoolTargetRolesItemOutputWithContext added in v0.18.2

func (o GkeNodePoolTargetRolesItemOutput) ToGkeNodePoolTargetRolesItemOutputWithContext(ctx context.Context) GkeNodePoolTargetRolesItemOutput

func (GkeNodePoolTargetRolesItemOutput) ToGkeNodePoolTargetRolesItemPtrOutput added in v0.18.2

func (o GkeNodePoolTargetRolesItemOutput) ToGkeNodePoolTargetRolesItemPtrOutput() GkeNodePoolTargetRolesItemPtrOutput

func (GkeNodePoolTargetRolesItemOutput) ToGkeNodePoolTargetRolesItemPtrOutputWithContext added in v0.18.2

func (o GkeNodePoolTargetRolesItemOutput) ToGkeNodePoolTargetRolesItemPtrOutputWithContext(ctx context.Context) GkeNodePoolTargetRolesItemPtrOutput

func (GkeNodePoolTargetRolesItemOutput) ToStringOutput added in v0.18.2

func (GkeNodePoolTargetRolesItemOutput) ToStringOutputWithContext added in v0.18.2

func (o GkeNodePoolTargetRolesItemOutput) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput

func (GkeNodePoolTargetRolesItemOutput) ToStringPtrOutput added in v0.18.2

func (GkeNodePoolTargetRolesItemOutput) ToStringPtrOutputWithContext added in v0.18.2

func (o GkeNodePoolTargetRolesItemOutput) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput

type GkeNodePoolTargetRolesItemPtrInput added in v0.18.2

type GkeNodePoolTargetRolesItemPtrInput interface {
	pulumi.Input

	ToGkeNodePoolTargetRolesItemPtrOutput() GkeNodePoolTargetRolesItemPtrOutput
	ToGkeNodePoolTargetRolesItemPtrOutputWithContext(context.Context) GkeNodePoolTargetRolesItemPtrOutput
}

func GkeNodePoolTargetRolesItemPtr added in v0.18.2

func GkeNodePoolTargetRolesItemPtr(v string) GkeNodePoolTargetRolesItemPtrInput

type GkeNodePoolTargetRolesItemPtrOutput added in v0.18.2

type GkeNodePoolTargetRolesItemPtrOutput struct{ *pulumi.OutputState }

func (GkeNodePoolTargetRolesItemPtrOutput) Elem added in v0.18.2

func (GkeNodePoolTargetRolesItemPtrOutput) ElementType added in v0.18.2

func (GkeNodePoolTargetRolesItemPtrOutput) ToGkeNodePoolTargetRolesItemPtrOutput added in v0.18.2

func (o GkeNodePoolTargetRolesItemPtrOutput) ToGkeNodePoolTargetRolesItemPtrOutput() GkeNodePoolTargetRolesItemPtrOutput

func (GkeNodePoolTargetRolesItemPtrOutput) ToGkeNodePoolTargetRolesItemPtrOutputWithContext added in v0.18.2

func (o GkeNodePoolTargetRolesItemPtrOutput) ToGkeNodePoolTargetRolesItemPtrOutputWithContext(ctx context.Context) GkeNodePoolTargetRolesItemPtrOutput

func (GkeNodePoolTargetRolesItemPtrOutput) ToStringPtrOutput added in v0.18.2

func (GkeNodePoolTargetRolesItemPtrOutput) ToStringPtrOutputWithContext added in v0.18.2

func (o GkeNodePoolTargetRolesItemPtrOutput) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput

type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig added in v0.32.0

type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig struct {
	// Optional. The Cloud KMS key name to use for encrypting customer core content.
	KmsKey *string `pulumi:"kmsKey"`
}

Encryption settings for the encrypting customer core content. NEXT ID: 2

type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigArgs added in v0.32.0

type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigArgs struct {
	// Optional. The Cloud KMS key name to use for encrypting customer core content.
	KmsKey pulumi.StringPtrInput `pulumi:"kmsKey"`
}

Encryption settings for the encrypting customer core content. NEXT ID: 2

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigArgs) ElementType added in v0.32.0

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigArgs) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput added in v0.32.0

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigArgs) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutputWithContext added in v0.32.0

func (i GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigArgs) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutputWithContext(ctx context.Context) GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigArgs) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput added in v0.32.0

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigArgs) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutputWithContext added in v0.32.0

func (i GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigArgs) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutputWithContext(ctx context.Context) GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput

type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigInput added in v0.32.0

type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigInput interface {
	pulumi.Input

	ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput() GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput
	ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutputWithContext(context.Context) GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput
}

GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigInput is an input type that accepts GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigArgs and GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput values. You can construct a concrete instance of `GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigInput` via:

GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigArgs{...}

type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput added in v0.32.0

type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput struct{ *pulumi.OutputState }

Encryption settings for the encrypting customer core content. NEXT ID: 2

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput) ElementType added in v0.32.0

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput) KmsKey added in v0.32.0

Optional. The Cloud KMS key name to use for encrypting customer core content.

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput added in v0.32.0

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutputWithContext added in v0.32.0

func (o GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutputWithContext(ctx context.Context) GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput added in v0.32.0

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutputWithContext added in v0.32.0

func (o GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigOutput) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutputWithContext(ctx context.Context) GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput

type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrInput added in v0.32.0

type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrInput interface {
	pulumi.Input

	ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput() GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput
	ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutputWithContext(context.Context) GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput
}

GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrInput is an input type that accepts GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigArgs, GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtr and GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput values. You can construct a concrete instance of `GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrInput` via:

        GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigArgs{...}

or:

        nil

type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput added in v0.32.0

type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput struct{ *pulumi.OutputState }

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput) Elem added in v0.32.0

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput) ElementType added in v0.32.0

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput) KmsKey added in v0.32.0

Optional. The Cloud KMS key name to use for encrypting customer core content.

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput added in v0.32.0

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutputWithContext added in v0.32.0

func (o GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutputWithContext(ctx context.Context) GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrOutput

type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigResponse added in v0.32.0

type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigResponse struct {
	// Optional. The Cloud KMS key name to use for encrypting customer core content.
	KmsKey string `pulumi:"kmsKey"`
}

Encryption settings for the encrypting customer core content. NEXT ID: 2

type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigResponseOutput added in v0.32.0

type GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigResponseOutput struct{ *pulumi.OutputState }

Encryption settings for the encrypting customer core content. NEXT ID: 2

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigResponseOutput) ElementType added in v0.32.0

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigResponseOutput) KmsKey added in v0.32.0

Optional. The Cloud KMS key name to use for encrypting customer core content.

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigResponseOutput) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigResponseOutput added in v0.32.0

func (GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigResponseOutput) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigResponseOutputWithContext added in v0.32.0

func (o GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigResponseOutput) ToGoogleCloudDataprocV1WorkflowTemplateEncryptionConfigResponseOutputWithContext(ctx context.Context) GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigResponseOutput

type HadoopJob

type HadoopJob struct {
	// Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision might occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *LoggingConfig `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
	MainClass *string `pulumi:"mainClass"`
	// The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
	MainJarFileUri *string `pulumi:"mainJarFileUri"`
	// Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
	Properties map[string]string `pulumi:"properties"`
}

A Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).

type HadoopJobArgs

type HadoopJobArgs struct {
	// Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision might occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigPtrInput `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
	MainClass pulumi.StringPtrInput `pulumi:"mainClass"`
	// The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
	MainJarFileUri pulumi.StringPtrInput `pulumi:"mainJarFileUri"`
	// Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
	Properties pulumi.StringMapInput `pulumi:"properties"`
}

A Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).

func (HadoopJobArgs) ElementType

func (HadoopJobArgs) ElementType() reflect.Type

func (HadoopJobArgs) ToHadoopJobOutput

func (i HadoopJobArgs) ToHadoopJobOutput() HadoopJobOutput

func (HadoopJobArgs) ToHadoopJobOutputWithContext

func (i HadoopJobArgs) ToHadoopJobOutputWithContext(ctx context.Context) HadoopJobOutput

func (HadoopJobArgs) ToHadoopJobPtrOutput

func (i HadoopJobArgs) ToHadoopJobPtrOutput() HadoopJobPtrOutput

func (HadoopJobArgs) ToHadoopJobPtrOutputWithContext

func (i HadoopJobArgs) ToHadoopJobPtrOutputWithContext(ctx context.Context) HadoopJobPtrOutput

type HadoopJobInput

type HadoopJobInput interface {
	pulumi.Input

	ToHadoopJobOutput() HadoopJobOutput
	ToHadoopJobOutputWithContext(context.Context) HadoopJobOutput
}

HadoopJobInput is an input type that accepts HadoopJobArgs and HadoopJobOutput values. You can construct a concrete instance of `HadoopJobInput` via:

HadoopJobArgs{...}

type HadoopJobOutput

type HadoopJobOutput struct{ *pulumi.OutputState }

A Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).

func (HadoopJobOutput) ArchiveUris

func (o HadoopJobOutput) ArchiveUris() pulumi.StringArrayOutput

Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.

func (HadoopJobOutput) Args

Optional. The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision might occur that causes an incorrect job submission.

func (HadoopJobOutput) ElementType

func (HadoopJobOutput) ElementType() reflect.Type

func (HadoopJobOutput) FileUris

Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (HadoopJobOutput) JarFileUris

func (o HadoopJobOutput) JarFileUris() pulumi.StringArrayOutput

Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.

func (HadoopJobOutput) LoggingConfig

func (o HadoopJobOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (HadoopJobOutput) MainClass

func (o HadoopJobOutput) MainClass() pulumi.StringPtrOutput

The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.

func (HadoopJobOutput) MainJarFileUri

func (o HadoopJobOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'

func (HadoopJobOutput) Properties

func (o HadoopJobOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.

func (HadoopJobOutput) ToHadoopJobOutput

func (o HadoopJobOutput) ToHadoopJobOutput() HadoopJobOutput

func (HadoopJobOutput) ToHadoopJobOutputWithContext

func (o HadoopJobOutput) ToHadoopJobOutputWithContext(ctx context.Context) HadoopJobOutput

func (HadoopJobOutput) ToHadoopJobPtrOutput

func (o HadoopJobOutput) ToHadoopJobPtrOutput() HadoopJobPtrOutput

func (HadoopJobOutput) ToHadoopJobPtrOutputWithContext

func (o HadoopJobOutput) ToHadoopJobPtrOutputWithContext(ctx context.Context) HadoopJobPtrOutput

type HadoopJobPtrInput

type HadoopJobPtrInput interface {
	pulumi.Input

	ToHadoopJobPtrOutput() HadoopJobPtrOutput
	ToHadoopJobPtrOutputWithContext(context.Context) HadoopJobPtrOutput
}

HadoopJobPtrInput is an input type that accepts HadoopJobArgs, HadoopJobPtr and HadoopJobPtrOutput values. You can construct a concrete instance of `HadoopJobPtrInput` via:

        HadoopJobArgs{...}

or:

        nil

func HadoopJobPtr

func HadoopJobPtr(v *HadoopJobArgs) HadoopJobPtrInput

type HadoopJobPtrOutput

type HadoopJobPtrOutput struct{ *pulumi.OutputState }

func (HadoopJobPtrOutput) ArchiveUris

Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.

func (HadoopJobPtrOutput) Args

Optional. The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision might occur that causes an incorrect job submission.

func (HadoopJobPtrOutput) Elem

func (HadoopJobPtrOutput) ElementType

func (HadoopJobPtrOutput) ElementType() reflect.Type

func (HadoopJobPtrOutput) FileUris

Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (HadoopJobPtrOutput) JarFileUris

Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.

func (HadoopJobPtrOutput) LoggingConfig

func (o HadoopJobPtrOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (HadoopJobPtrOutput) MainClass

The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.

func (HadoopJobPtrOutput) MainJarFileUri

func (o HadoopJobPtrOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'

func (HadoopJobPtrOutput) Properties

func (o HadoopJobPtrOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.

func (HadoopJobPtrOutput) ToHadoopJobPtrOutput

func (o HadoopJobPtrOutput) ToHadoopJobPtrOutput() HadoopJobPtrOutput

func (HadoopJobPtrOutput) ToHadoopJobPtrOutputWithContext

func (o HadoopJobPtrOutput) ToHadoopJobPtrOutputWithContext(ctx context.Context) HadoopJobPtrOutput

type HadoopJobResponse

type HadoopJobResponse struct {
	// Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision might occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigResponse `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
	MainClass string `pulumi:"mainClass"`
	// The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
	MainJarFileUri string `pulumi:"mainJarFileUri"`
	// Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
	Properties map[string]string `pulumi:"properties"`
}

A Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).

type HadoopJobResponseOutput

type HadoopJobResponseOutput struct{ *pulumi.OutputState }

A Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).

func (HadoopJobResponseOutput) ArchiveUris

Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.

func (HadoopJobResponseOutput) Args

Optional. The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision might occur that causes an incorrect job submission.

func (HadoopJobResponseOutput) ElementType

func (HadoopJobResponseOutput) ElementType() reflect.Type

func (HadoopJobResponseOutput) FileUris

Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (HadoopJobResponseOutput) JarFileUris

Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.

func (HadoopJobResponseOutput) LoggingConfig

Optional. The runtime log config for job execution.

func (HadoopJobResponseOutput) MainClass

The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.

func (HadoopJobResponseOutput) MainJarFileUri

func (o HadoopJobResponseOutput) MainJarFileUri() pulumi.StringOutput

The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'

func (HadoopJobResponseOutput) Properties

Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.

func (HadoopJobResponseOutput) ToHadoopJobResponseOutput

func (o HadoopJobResponseOutput) ToHadoopJobResponseOutput() HadoopJobResponseOutput

func (HadoopJobResponseOutput) ToHadoopJobResponseOutputWithContext

func (o HadoopJobResponseOutput) ToHadoopJobResponseOutputWithContext(ctx context.Context) HadoopJobResponseOutput

type HiveJob

type HiveJob struct {
	// Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
	ContinueOnFailure *bool `pulumi:"continueOnFailure"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains Hive queries.
	QueryFileUri *string `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList *QueryList `pulumi:"queryList"`
	// Optional. Mapping of query variable names to values (equivalent to the Hive command: SET name="value";).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

A Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN.

type HiveJobArgs

type HiveJobArgs struct {
	// Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
	ContinueOnFailure pulumi.BoolPtrInput `pulumi:"continueOnFailure"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains Hive queries.
	QueryFileUri pulumi.StringPtrInput `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList QueryListPtrInput `pulumi:"queryList"`
	// Optional. Mapping of query variable names to values (equivalent to the Hive command: SET name="value";).
	ScriptVariables pulumi.StringMapInput `pulumi:"scriptVariables"`
}

A Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN.

func (HiveJobArgs) ElementType

func (HiveJobArgs) ElementType() reflect.Type

func (HiveJobArgs) ToHiveJobOutput

func (i HiveJobArgs) ToHiveJobOutput() HiveJobOutput

func (HiveJobArgs) ToHiveJobOutputWithContext

func (i HiveJobArgs) ToHiveJobOutputWithContext(ctx context.Context) HiveJobOutput

func (HiveJobArgs) ToHiveJobPtrOutput

func (i HiveJobArgs) ToHiveJobPtrOutput() HiveJobPtrOutput

func (HiveJobArgs) ToHiveJobPtrOutputWithContext

func (i HiveJobArgs) ToHiveJobPtrOutputWithContext(ctx context.Context) HiveJobPtrOutput

type HiveJobInput

type HiveJobInput interface {
	pulumi.Input

	ToHiveJobOutput() HiveJobOutput
	ToHiveJobOutputWithContext(context.Context) HiveJobOutput
}

HiveJobInput is an input type that accepts HiveJobArgs and HiveJobOutput values. You can construct a concrete instance of `HiveJobInput` via:

HiveJobArgs{...}

type HiveJobOutput

type HiveJobOutput struct{ *pulumi.OutputState }

A Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN.

func (HiveJobOutput) ContinueOnFailure

func (o HiveJobOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.

func (HiveJobOutput) ElementType

func (HiveJobOutput) ElementType() reflect.Type

func (HiveJobOutput) JarFileUris

func (o HiveJobOutput) JarFileUris() pulumi.StringArrayOutput

Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.

func (HiveJobOutput) Properties

func (o HiveJobOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.

func (HiveJobOutput) QueryFileUri

func (o HiveJobOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains Hive queries.

func (HiveJobOutput) QueryList

func (o HiveJobOutput) QueryList() QueryListPtrOutput

A list of queries.

func (HiveJobOutput) ScriptVariables

func (o HiveJobOutput) ScriptVariables() pulumi.StringMapOutput

Optional. Mapping of query variable names to values (equivalent to the Hive command: SET name="value";).

func (HiveJobOutput) ToHiveJobOutput

func (o HiveJobOutput) ToHiveJobOutput() HiveJobOutput

func (HiveJobOutput) ToHiveJobOutputWithContext

func (o HiveJobOutput) ToHiveJobOutputWithContext(ctx context.Context) HiveJobOutput

func (HiveJobOutput) ToHiveJobPtrOutput

func (o HiveJobOutput) ToHiveJobPtrOutput() HiveJobPtrOutput

func (HiveJobOutput) ToHiveJobPtrOutputWithContext

func (o HiveJobOutput) ToHiveJobPtrOutputWithContext(ctx context.Context) HiveJobPtrOutput

type HiveJobPtrInput

type HiveJobPtrInput interface {
	pulumi.Input

	ToHiveJobPtrOutput() HiveJobPtrOutput
	ToHiveJobPtrOutputWithContext(context.Context) HiveJobPtrOutput
}

HiveJobPtrInput is an input type that accepts HiveJobArgs, HiveJobPtr and HiveJobPtrOutput values. You can construct a concrete instance of `HiveJobPtrInput` via:

        HiveJobArgs{...}

or:

        nil

func HiveJobPtr

func HiveJobPtr(v *HiveJobArgs) HiveJobPtrInput

type HiveJobPtrOutput

type HiveJobPtrOutput struct{ *pulumi.OutputState }

func (HiveJobPtrOutput) ContinueOnFailure

func (o HiveJobPtrOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.

func (HiveJobPtrOutput) Elem

func (HiveJobPtrOutput) ElementType

func (HiveJobPtrOutput) ElementType() reflect.Type

func (HiveJobPtrOutput) JarFileUris

func (o HiveJobPtrOutput) JarFileUris() pulumi.StringArrayOutput

Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.

func (HiveJobPtrOutput) Properties

func (o HiveJobPtrOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.

func (HiveJobPtrOutput) QueryFileUri

func (o HiveJobPtrOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains Hive queries.

func (HiveJobPtrOutput) QueryList

func (o HiveJobPtrOutput) QueryList() QueryListPtrOutput

A list of queries.

func (HiveJobPtrOutput) ScriptVariables

func (o HiveJobPtrOutput) ScriptVariables() pulumi.StringMapOutput

Optional. Mapping of query variable names to values (equivalent to the Hive command: SET name="value";).

func (HiveJobPtrOutput) ToHiveJobPtrOutput

func (o HiveJobPtrOutput) ToHiveJobPtrOutput() HiveJobPtrOutput

func (HiveJobPtrOutput) ToHiveJobPtrOutputWithContext

func (o HiveJobPtrOutput) ToHiveJobPtrOutputWithContext(ctx context.Context) HiveJobPtrOutput

type HiveJobResponse

type HiveJobResponse struct {
	// Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
	ContinueOnFailure bool `pulumi:"continueOnFailure"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains Hive queries.
	QueryFileUri string `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList QueryListResponse `pulumi:"queryList"`
	// Optional. Mapping of query variable names to values (equivalent to the Hive command: SET name="value";).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

A Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN.

type HiveJobResponseOutput

type HiveJobResponseOutput struct{ *pulumi.OutputState }

A Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN.

func (HiveJobResponseOutput) ContinueOnFailure

func (o HiveJobResponseOutput) ContinueOnFailure() pulumi.BoolOutput

Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.

func (HiveJobResponseOutput) ElementType

func (HiveJobResponseOutput) ElementType() reflect.Type

func (HiveJobResponseOutput) JarFileUris

Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.

func (HiveJobResponseOutput) Properties

Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.

func (HiveJobResponseOutput) QueryFileUri

func (o HiveJobResponseOutput) QueryFileUri() pulumi.StringOutput

The HCFS URI of the script that contains Hive queries.

func (HiveJobResponseOutput) QueryList

A list of queries.

func (HiveJobResponseOutput) ScriptVariables

func (o HiveJobResponseOutput) ScriptVariables() pulumi.StringMapOutput

Optional. Mapping of query variable names to values (equivalent to the Hive command: SET name="value";).

func (HiveJobResponseOutput) ToHiveJobResponseOutput

func (o HiveJobResponseOutput) ToHiveJobResponseOutput() HiveJobResponseOutput

func (HiveJobResponseOutput) ToHiveJobResponseOutputWithContext

func (o HiveJobResponseOutput) ToHiveJobResponseOutputWithContext(ctx context.Context) HiveJobResponseOutput

type IdentityConfig

type IdentityConfig struct {
	// Map of user to service account.
	UserServiceAccountMapping map[string]string `pulumi:"userServiceAccountMapping"`
}

Identity related configuration, including service account based secure multi-tenancy user mappings.

type IdentityConfigArgs

type IdentityConfigArgs struct {
	// Map of user to service account.
	UserServiceAccountMapping pulumi.StringMapInput `pulumi:"userServiceAccountMapping"`
}

Identity related configuration, including service account based secure multi-tenancy user mappings.

func (IdentityConfigArgs) ElementType

func (IdentityConfigArgs) ElementType() reflect.Type

func (IdentityConfigArgs) ToIdentityConfigOutput

func (i IdentityConfigArgs) ToIdentityConfigOutput() IdentityConfigOutput

func (IdentityConfigArgs) ToIdentityConfigOutputWithContext

func (i IdentityConfigArgs) ToIdentityConfigOutputWithContext(ctx context.Context) IdentityConfigOutput

func (IdentityConfigArgs) ToIdentityConfigPtrOutput

func (i IdentityConfigArgs) ToIdentityConfigPtrOutput() IdentityConfigPtrOutput

func (IdentityConfigArgs) ToIdentityConfigPtrOutputWithContext

func (i IdentityConfigArgs) ToIdentityConfigPtrOutputWithContext(ctx context.Context) IdentityConfigPtrOutput

type IdentityConfigInput

type IdentityConfigInput interface {
	pulumi.Input

	ToIdentityConfigOutput() IdentityConfigOutput
	ToIdentityConfigOutputWithContext(context.Context) IdentityConfigOutput
}

IdentityConfigInput is an input type that accepts IdentityConfigArgs and IdentityConfigOutput values. You can construct a concrete instance of `IdentityConfigInput` via:

IdentityConfigArgs{...}

type IdentityConfigOutput

type IdentityConfigOutput struct{ *pulumi.OutputState }

Identity related configuration, including service account based secure multi-tenancy user mappings.

func (IdentityConfigOutput) ElementType

func (IdentityConfigOutput) ElementType() reflect.Type

func (IdentityConfigOutput) ToIdentityConfigOutput

func (o IdentityConfigOutput) ToIdentityConfigOutput() IdentityConfigOutput

func (IdentityConfigOutput) ToIdentityConfigOutputWithContext

func (o IdentityConfigOutput) ToIdentityConfigOutputWithContext(ctx context.Context) IdentityConfigOutput

func (IdentityConfigOutput) ToIdentityConfigPtrOutput

func (o IdentityConfigOutput) ToIdentityConfigPtrOutput() IdentityConfigPtrOutput

func (IdentityConfigOutput) ToIdentityConfigPtrOutputWithContext

func (o IdentityConfigOutput) ToIdentityConfigPtrOutputWithContext(ctx context.Context) IdentityConfigPtrOutput

func (IdentityConfigOutput) UserServiceAccountMapping

func (o IdentityConfigOutput) UserServiceAccountMapping() pulumi.StringMapOutput

Map of user to service account.

type IdentityConfigPtrInput

type IdentityConfigPtrInput interface {
	pulumi.Input

	ToIdentityConfigPtrOutput() IdentityConfigPtrOutput
	ToIdentityConfigPtrOutputWithContext(context.Context) IdentityConfigPtrOutput
}

IdentityConfigPtrInput is an input type that accepts IdentityConfigArgs, IdentityConfigPtr and IdentityConfigPtrOutput values. You can construct a concrete instance of `IdentityConfigPtrInput` via:

        IdentityConfigArgs{...}

or:

        nil

type IdentityConfigPtrOutput

type IdentityConfigPtrOutput struct{ *pulumi.OutputState }

func (IdentityConfigPtrOutput) Elem

func (IdentityConfigPtrOutput) ElementType

func (IdentityConfigPtrOutput) ElementType() reflect.Type

func (IdentityConfigPtrOutput) ToIdentityConfigPtrOutput

func (o IdentityConfigPtrOutput) ToIdentityConfigPtrOutput() IdentityConfigPtrOutput

func (IdentityConfigPtrOutput) ToIdentityConfigPtrOutputWithContext

func (o IdentityConfigPtrOutput) ToIdentityConfigPtrOutputWithContext(ctx context.Context) IdentityConfigPtrOutput

func (IdentityConfigPtrOutput) UserServiceAccountMapping

func (o IdentityConfigPtrOutput) UserServiceAccountMapping() pulumi.StringMapOutput

Map of user to service account.

type IdentityConfigResponse

type IdentityConfigResponse struct {
	// Map of user to service account.
	UserServiceAccountMapping map[string]string `pulumi:"userServiceAccountMapping"`
}

Identity related configuration, including service account based secure multi-tenancy user mappings.

type IdentityConfigResponseOutput

type IdentityConfigResponseOutput struct{ *pulumi.OutputState }

Identity related configuration, including service account based secure multi-tenancy user mappings.

func (IdentityConfigResponseOutput) ElementType

func (IdentityConfigResponseOutput) ToIdentityConfigResponseOutput

func (o IdentityConfigResponseOutput) ToIdentityConfigResponseOutput() IdentityConfigResponseOutput

func (IdentityConfigResponseOutput) ToIdentityConfigResponseOutputWithContext

func (o IdentityConfigResponseOutput) ToIdentityConfigResponseOutputWithContext(ctx context.Context) IdentityConfigResponseOutput

func (IdentityConfigResponseOutput) UserServiceAccountMapping

func (o IdentityConfigResponseOutput) UserServiceAccountMapping() pulumi.StringMapOutput

Map of user to service account.

type InstanceFlexibilityPolicy added in v0.32.0

type InstanceFlexibilityPolicy struct {
	// Optional. List of instance selection options that the group will use when creating new VMs.
	InstanceSelectionList []InstanceSelection `pulumi:"instanceSelectionList"`
}

Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.

type InstanceFlexibilityPolicyArgs added in v0.32.0

type InstanceFlexibilityPolicyArgs struct {
	// Optional. List of instance selection options that the group will use when creating new VMs.
	InstanceSelectionList InstanceSelectionArrayInput `pulumi:"instanceSelectionList"`
}

Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.

func (InstanceFlexibilityPolicyArgs) ElementType added in v0.32.0

func (InstanceFlexibilityPolicyArgs) ToInstanceFlexibilityPolicyOutput added in v0.32.0

func (i InstanceFlexibilityPolicyArgs) ToInstanceFlexibilityPolicyOutput() InstanceFlexibilityPolicyOutput

func (InstanceFlexibilityPolicyArgs) ToInstanceFlexibilityPolicyOutputWithContext added in v0.32.0

func (i InstanceFlexibilityPolicyArgs) ToInstanceFlexibilityPolicyOutputWithContext(ctx context.Context) InstanceFlexibilityPolicyOutput

func (InstanceFlexibilityPolicyArgs) ToInstanceFlexibilityPolicyPtrOutput added in v0.32.0

func (i InstanceFlexibilityPolicyArgs) ToInstanceFlexibilityPolicyPtrOutput() InstanceFlexibilityPolicyPtrOutput

func (InstanceFlexibilityPolicyArgs) ToInstanceFlexibilityPolicyPtrOutputWithContext added in v0.32.0

func (i InstanceFlexibilityPolicyArgs) ToInstanceFlexibilityPolicyPtrOutputWithContext(ctx context.Context) InstanceFlexibilityPolicyPtrOutput

type InstanceFlexibilityPolicyInput added in v0.32.0

type InstanceFlexibilityPolicyInput interface {
	pulumi.Input

	ToInstanceFlexibilityPolicyOutput() InstanceFlexibilityPolicyOutput
	ToInstanceFlexibilityPolicyOutputWithContext(context.Context) InstanceFlexibilityPolicyOutput
}

InstanceFlexibilityPolicyInput is an input type that accepts InstanceFlexibilityPolicyArgs and InstanceFlexibilityPolicyOutput values. You can construct a concrete instance of `InstanceFlexibilityPolicyInput` via:

InstanceFlexibilityPolicyArgs{...}

type InstanceFlexibilityPolicyOutput added in v0.32.0

type InstanceFlexibilityPolicyOutput struct{ *pulumi.OutputState }

Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.

func (InstanceFlexibilityPolicyOutput) ElementType added in v0.32.0

func (InstanceFlexibilityPolicyOutput) InstanceSelectionList added in v0.32.0

Optional. List of instance selection options that the group will use when creating new VMs.

func (InstanceFlexibilityPolicyOutput) ToInstanceFlexibilityPolicyOutput added in v0.32.0

func (o InstanceFlexibilityPolicyOutput) ToInstanceFlexibilityPolicyOutput() InstanceFlexibilityPolicyOutput

func (InstanceFlexibilityPolicyOutput) ToInstanceFlexibilityPolicyOutputWithContext added in v0.32.0

func (o InstanceFlexibilityPolicyOutput) ToInstanceFlexibilityPolicyOutputWithContext(ctx context.Context) InstanceFlexibilityPolicyOutput

func (InstanceFlexibilityPolicyOutput) ToInstanceFlexibilityPolicyPtrOutput added in v0.32.0

func (o InstanceFlexibilityPolicyOutput) ToInstanceFlexibilityPolicyPtrOutput() InstanceFlexibilityPolicyPtrOutput

func (InstanceFlexibilityPolicyOutput) ToInstanceFlexibilityPolicyPtrOutputWithContext added in v0.32.0

func (o InstanceFlexibilityPolicyOutput) ToInstanceFlexibilityPolicyPtrOutputWithContext(ctx context.Context) InstanceFlexibilityPolicyPtrOutput

type InstanceFlexibilityPolicyPtrInput added in v0.32.0

type InstanceFlexibilityPolicyPtrInput interface {
	pulumi.Input

	ToInstanceFlexibilityPolicyPtrOutput() InstanceFlexibilityPolicyPtrOutput
	ToInstanceFlexibilityPolicyPtrOutputWithContext(context.Context) InstanceFlexibilityPolicyPtrOutput
}

InstanceFlexibilityPolicyPtrInput is an input type that accepts InstanceFlexibilityPolicyArgs, InstanceFlexibilityPolicyPtr and InstanceFlexibilityPolicyPtrOutput values. You can construct a concrete instance of `InstanceFlexibilityPolicyPtrInput` via:

        InstanceFlexibilityPolicyArgs{...}

or:

        nil

func InstanceFlexibilityPolicyPtr added in v0.32.0

type InstanceFlexibilityPolicyPtrOutput added in v0.32.0

type InstanceFlexibilityPolicyPtrOutput struct{ *pulumi.OutputState }

func (InstanceFlexibilityPolicyPtrOutput) Elem added in v0.32.0

func (InstanceFlexibilityPolicyPtrOutput) ElementType added in v0.32.0

func (InstanceFlexibilityPolicyPtrOutput) InstanceSelectionList added in v0.32.0

Optional. List of instance selection options that the group will use when creating new VMs.

func (InstanceFlexibilityPolicyPtrOutput) ToInstanceFlexibilityPolicyPtrOutput added in v0.32.0

func (o InstanceFlexibilityPolicyPtrOutput) ToInstanceFlexibilityPolicyPtrOutput() InstanceFlexibilityPolicyPtrOutput

func (InstanceFlexibilityPolicyPtrOutput) ToInstanceFlexibilityPolicyPtrOutputWithContext added in v0.32.0

func (o InstanceFlexibilityPolicyPtrOutput) ToInstanceFlexibilityPolicyPtrOutputWithContext(ctx context.Context) InstanceFlexibilityPolicyPtrOutput

type InstanceFlexibilityPolicyResponse added in v0.32.0

type InstanceFlexibilityPolicyResponse struct {
	// Optional. List of instance selection options that the group will use when creating new VMs.
	InstanceSelectionList []InstanceSelectionResponse `pulumi:"instanceSelectionList"`
	// A list of instance selection results in the group.
	InstanceSelectionResults []InstanceSelectionResultResponse `pulumi:"instanceSelectionResults"`
}

Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.

type InstanceFlexibilityPolicyResponseOutput added in v0.32.0

type InstanceFlexibilityPolicyResponseOutput struct{ *pulumi.OutputState }

Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.

func (InstanceFlexibilityPolicyResponseOutput) ElementType added in v0.32.0

func (InstanceFlexibilityPolicyResponseOutput) InstanceSelectionList added in v0.32.0

Optional. List of instance selection options that the group will use when creating new VMs.

func (InstanceFlexibilityPolicyResponseOutput) InstanceSelectionResults added in v0.32.0

A list of instance selection results in the group.

func (InstanceFlexibilityPolicyResponseOutput) ToInstanceFlexibilityPolicyResponseOutput added in v0.32.0

func (o InstanceFlexibilityPolicyResponseOutput) ToInstanceFlexibilityPolicyResponseOutput() InstanceFlexibilityPolicyResponseOutput

func (InstanceFlexibilityPolicyResponseOutput) ToInstanceFlexibilityPolicyResponseOutputWithContext added in v0.32.0

func (o InstanceFlexibilityPolicyResponseOutput) ToInstanceFlexibilityPolicyResponseOutputWithContext(ctx context.Context) InstanceFlexibilityPolicyResponseOutput

type InstanceGroupAutoscalingPolicyConfig

type InstanceGroupAutoscalingPolicyConfig struct {
	// Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set.Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0.
	MaxInstances int `pulumi:"maxInstances"`
	// Optional. Minimum number of instances for this group.Primary workers - Bounds: 2, max_instances. Default: 2. Secondary workers - Bounds: 0, max_instances. Default: 0.
	MinInstances *int `pulumi:"minInstances"`
	// Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker.The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if max_instances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created.If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.
	Weight *int `pulumi:"weight"`
}

Configuration for the size bounds of an instance group, including its proportional size to other groups.

type InstanceGroupAutoscalingPolicyConfigArgs

type InstanceGroupAutoscalingPolicyConfigArgs struct {
	// Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set.Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0.
	MaxInstances pulumi.IntInput `pulumi:"maxInstances"`
	// Optional. Minimum number of instances for this group.Primary workers - Bounds: 2, max_instances. Default: 2. Secondary workers - Bounds: 0, max_instances. Default: 0.
	MinInstances pulumi.IntPtrInput `pulumi:"minInstances"`
	// Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker.The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if max_instances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created.If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.
	Weight pulumi.IntPtrInput `pulumi:"weight"`
}

Configuration for the size bounds of an instance group, including its proportional size to other groups.

func (InstanceGroupAutoscalingPolicyConfigArgs) ElementType

func (InstanceGroupAutoscalingPolicyConfigArgs) ToInstanceGroupAutoscalingPolicyConfigOutput

func (i InstanceGroupAutoscalingPolicyConfigArgs) ToInstanceGroupAutoscalingPolicyConfigOutput() InstanceGroupAutoscalingPolicyConfigOutput

func (InstanceGroupAutoscalingPolicyConfigArgs) ToInstanceGroupAutoscalingPolicyConfigOutputWithContext

func (i InstanceGroupAutoscalingPolicyConfigArgs) ToInstanceGroupAutoscalingPolicyConfigOutputWithContext(ctx context.Context) InstanceGroupAutoscalingPolicyConfigOutput

func (InstanceGroupAutoscalingPolicyConfigArgs) ToInstanceGroupAutoscalingPolicyConfigPtrOutput

func (i InstanceGroupAutoscalingPolicyConfigArgs) ToInstanceGroupAutoscalingPolicyConfigPtrOutput() InstanceGroupAutoscalingPolicyConfigPtrOutput

func (InstanceGroupAutoscalingPolicyConfigArgs) ToInstanceGroupAutoscalingPolicyConfigPtrOutputWithContext

func (i InstanceGroupAutoscalingPolicyConfigArgs) ToInstanceGroupAutoscalingPolicyConfigPtrOutputWithContext(ctx context.Context) InstanceGroupAutoscalingPolicyConfigPtrOutput

type InstanceGroupAutoscalingPolicyConfigInput

type InstanceGroupAutoscalingPolicyConfigInput interface {
	pulumi.Input

	ToInstanceGroupAutoscalingPolicyConfigOutput() InstanceGroupAutoscalingPolicyConfigOutput
	ToInstanceGroupAutoscalingPolicyConfigOutputWithContext(context.Context) InstanceGroupAutoscalingPolicyConfigOutput
}

InstanceGroupAutoscalingPolicyConfigInput is an input type that accepts InstanceGroupAutoscalingPolicyConfigArgs and InstanceGroupAutoscalingPolicyConfigOutput values. You can construct a concrete instance of `InstanceGroupAutoscalingPolicyConfigInput` via:

InstanceGroupAutoscalingPolicyConfigArgs{...}

type InstanceGroupAutoscalingPolicyConfigOutput

type InstanceGroupAutoscalingPolicyConfigOutput struct{ *pulumi.OutputState }

Configuration for the size bounds of an instance group, including its proportional size to other groups.

func (InstanceGroupAutoscalingPolicyConfigOutput) ElementType

func (InstanceGroupAutoscalingPolicyConfigOutput) MaxInstances

Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set.Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0.

func (InstanceGroupAutoscalingPolicyConfigOutput) MinInstances

Optional. Minimum number of instances for this group.Primary workers - Bounds: 2, max_instances. Default: 2. Secondary workers - Bounds: 0, max_instances. Default: 0.

func (InstanceGroupAutoscalingPolicyConfigOutput) ToInstanceGroupAutoscalingPolicyConfigOutput

func (o InstanceGroupAutoscalingPolicyConfigOutput) ToInstanceGroupAutoscalingPolicyConfigOutput() InstanceGroupAutoscalingPolicyConfigOutput

func (InstanceGroupAutoscalingPolicyConfigOutput) ToInstanceGroupAutoscalingPolicyConfigOutputWithContext

func (o InstanceGroupAutoscalingPolicyConfigOutput) ToInstanceGroupAutoscalingPolicyConfigOutputWithContext(ctx context.Context) InstanceGroupAutoscalingPolicyConfigOutput

func (InstanceGroupAutoscalingPolicyConfigOutput) ToInstanceGroupAutoscalingPolicyConfigPtrOutput

func (o InstanceGroupAutoscalingPolicyConfigOutput) ToInstanceGroupAutoscalingPolicyConfigPtrOutput() InstanceGroupAutoscalingPolicyConfigPtrOutput

func (InstanceGroupAutoscalingPolicyConfigOutput) ToInstanceGroupAutoscalingPolicyConfigPtrOutputWithContext

func (o InstanceGroupAutoscalingPolicyConfigOutput) ToInstanceGroupAutoscalingPolicyConfigPtrOutputWithContext(ctx context.Context) InstanceGroupAutoscalingPolicyConfigPtrOutput

func (InstanceGroupAutoscalingPolicyConfigOutput) Weight

Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker.The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if max_instances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created.If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.

type InstanceGroupAutoscalingPolicyConfigPtrInput

type InstanceGroupAutoscalingPolicyConfigPtrInput interface {
	pulumi.Input

	ToInstanceGroupAutoscalingPolicyConfigPtrOutput() InstanceGroupAutoscalingPolicyConfigPtrOutput
	ToInstanceGroupAutoscalingPolicyConfigPtrOutputWithContext(context.Context) InstanceGroupAutoscalingPolicyConfigPtrOutput
}

InstanceGroupAutoscalingPolicyConfigPtrInput is an input type that accepts InstanceGroupAutoscalingPolicyConfigArgs, InstanceGroupAutoscalingPolicyConfigPtr and InstanceGroupAutoscalingPolicyConfigPtrOutput values. You can construct a concrete instance of `InstanceGroupAutoscalingPolicyConfigPtrInput` via:

        InstanceGroupAutoscalingPolicyConfigArgs{...}

or:

        nil

type InstanceGroupAutoscalingPolicyConfigPtrOutput

type InstanceGroupAutoscalingPolicyConfigPtrOutput struct{ *pulumi.OutputState }

func (InstanceGroupAutoscalingPolicyConfigPtrOutput) Elem

func (InstanceGroupAutoscalingPolicyConfigPtrOutput) ElementType

func (InstanceGroupAutoscalingPolicyConfigPtrOutput) MaxInstances

Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set.Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0.

func (InstanceGroupAutoscalingPolicyConfigPtrOutput) MinInstances

Optional. Minimum number of instances for this group.Primary workers - Bounds: 2, max_instances. Default: 2. Secondary workers - Bounds: 0, max_instances. Default: 0.

func (InstanceGroupAutoscalingPolicyConfigPtrOutput) ToInstanceGroupAutoscalingPolicyConfigPtrOutput

func (o InstanceGroupAutoscalingPolicyConfigPtrOutput) ToInstanceGroupAutoscalingPolicyConfigPtrOutput() InstanceGroupAutoscalingPolicyConfigPtrOutput

func (InstanceGroupAutoscalingPolicyConfigPtrOutput) ToInstanceGroupAutoscalingPolicyConfigPtrOutputWithContext

func (o InstanceGroupAutoscalingPolicyConfigPtrOutput) ToInstanceGroupAutoscalingPolicyConfigPtrOutputWithContext(ctx context.Context) InstanceGroupAutoscalingPolicyConfigPtrOutput

func (InstanceGroupAutoscalingPolicyConfigPtrOutput) Weight

Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker.The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if max_instances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created.If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.

type InstanceGroupAutoscalingPolicyConfigResponse

type InstanceGroupAutoscalingPolicyConfigResponse struct {
	// Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set.Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0.
	MaxInstances int `pulumi:"maxInstances"`
	// Optional. Minimum number of instances for this group.Primary workers - Bounds: 2, max_instances. Default: 2. Secondary workers - Bounds: 0, max_instances. Default: 0.
	MinInstances int `pulumi:"minInstances"`
	// Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker.The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if max_instances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created.If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.
	Weight int `pulumi:"weight"`
}

Configuration for the size bounds of an instance group, including its proportional size to other groups.

type InstanceGroupAutoscalingPolicyConfigResponseOutput

type InstanceGroupAutoscalingPolicyConfigResponseOutput struct{ *pulumi.OutputState }

Configuration for the size bounds of an instance group, including its proportional size to other groups.

func (InstanceGroupAutoscalingPolicyConfigResponseOutput) ElementType

func (InstanceGroupAutoscalingPolicyConfigResponseOutput) MaxInstances

Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set.Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0.

func (InstanceGroupAutoscalingPolicyConfigResponseOutput) MinInstances

Optional. Minimum number of instances for this group.Primary workers - Bounds: 2, max_instances. Default: 2. Secondary workers - Bounds: 0, max_instances. Default: 0.

func (InstanceGroupAutoscalingPolicyConfigResponseOutput) ToInstanceGroupAutoscalingPolicyConfigResponseOutput

func (o InstanceGroupAutoscalingPolicyConfigResponseOutput) ToInstanceGroupAutoscalingPolicyConfigResponseOutput() InstanceGroupAutoscalingPolicyConfigResponseOutput

func (InstanceGroupAutoscalingPolicyConfigResponseOutput) ToInstanceGroupAutoscalingPolicyConfigResponseOutputWithContext

func (o InstanceGroupAutoscalingPolicyConfigResponseOutput) ToInstanceGroupAutoscalingPolicyConfigResponseOutputWithContext(ctx context.Context) InstanceGroupAutoscalingPolicyConfigResponseOutput

func (InstanceGroupAutoscalingPolicyConfigResponseOutput) Weight

Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker.The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if max_instances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created.If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.

type InstanceGroupConfig

type InstanceGroupConfig struct {
	// Optional. The Compute Engine accelerator configuration for these instances.
	Accelerators []AcceleratorConfig `pulumi:"accelerators"`
	// Optional. Disk option config settings.
	DiskConfig *DiskConfig `pulumi:"diskConfig"`
	// Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
	ImageUri *string `pulumi:"imageUri"`
	// Optional. Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.
	InstanceFlexibilityPolicy *InstanceFlexibilityPolicy `pulumi:"instanceFlexibilityPolicy"`
	// Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2.
	MachineTypeUri *string `pulumi:"machineTypeUri"`
	// Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
	MinCpuPlatform *string `pulumi:"minCpuPlatform"`
	// Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted.
	MinNumInstances *int `pulumi:"minNumInstances"`
	// Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
	NumInstances *int `pulumi:"numInstances"`
	// Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE.
	Preemptibility *InstanceGroupConfigPreemptibility `pulumi:"preemptibility"`
	// Optional. Configuration to handle the startup of instances during cluster create and update process.
	StartupConfig *StartupConfig `pulumi:"startupConfig"`
}

The config settings for Compute Engine resources in an instance group, such as a master or worker group.

type InstanceGroupConfigArgs

type InstanceGroupConfigArgs struct {
	// Optional. The Compute Engine accelerator configuration for these instances.
	Accelerators AcceleratorConfigArrayInput `pulumi:"accelerators"`
	// Optional. Disk option config settings.
	DiskConfig DiskConfigPtrInput `pulumi:"diskConfig"`
	// Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
	ImageUri pulumi.StringPtrInput `pulumi:"imageUri"`
	// Optional. Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.
	InstanceFlexibilityPolicy InstanceFlexibilityPolicyPtrInput `pulumi:"instanceFlexibilityPolicy"`
	// Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2.
	MachineTypeUri pulumi.StringPtrInput `pulumi:"machineTypeUri"`
	// Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
	MinCpuPlatform pulumi.StringPtrInput `pulumi:"minCpuPlatform"`
	// Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted.
	MinNumInstances pulumi.IntPtrInput `pulumi:"minNumInstances"`
	// Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
	NumInstances pulumi.IntPtrInput `pulumi:"numInstances"`
	// Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE.
	Preemptibility InstanceGroupConfigPreemptibilityPtrInput `pulumi:"preemptibility"`
	// Optional. Configuration to handle the startup of instances during cluster create and update process.
	StartupConfig StartupConfigPtrInput `pulumi:"startupConfig"`
}

The config settings for Compute Engine resources in an instance group, such as a master or worker group.

func (InstanceGroupConfigArgs) ElementType

func (InstanceGroupConfigArgs) ElementType() reflect.Type

func (InstanceGroupConfigArgs) ToInstanceGroupConfigOutput

func (i InstanceGroupConfigArgs) ToInstanceGroupConfigOutput() InstanceGroupConfigOutput

func (InstanceGroupConfigArgs) ToInstanceGroupConfigOutputWithContext

func (i InstanceGroupConfigArgs) ToInstanceGroupConfigOutputWithContext(ctx context.Context) InstanceGroupConfigOutput

func (InstanceGroupConfigArgs) ToInstanceGroupConfigPtrOutput

func (i InstanceGroupConfigArgs) ToInstanceGroupConfigPtrOutput() InstanceGroupConfigPtrOutput

func (InstanceGroupConfigArgs) ToInstanceGroupConfigPtrOutputWithContext

func (i InstanceGroupConfigArgs) ToInstanceGroupConfigPtrOutputWithContext(ctx context.Context) InstanceGroupConfigPtrOutput

type InstanceGroupConfigInput

type InstanceGroupConfigInput interface {
	pulumi.Input

	ToInstanceGroupConfigOutput() InstanceGroupConfigOutput
	ToInstanceGroupConfigOutputWithContext(context.Context) InstanceGroupConfigOutput
}

InstanceGroupConfigInput is an input type that accepts InstanceGroupConfigArgs and InstanceGroupConfigOutput values. You can construct a concrete instance of `InstanceGroupConfigInput` via:

InstanceGroupConfigArgs{...}

type InstanceGroupConfigOutput

type InstanceGroupConfigOutput struct{ *pulumi.OutputState }

The config settings for Compute Engine resources in an instance group, such as a master or worker group.

func (InstanceGroupConfigOutput) Accelerators

Optional. The Compute Engine accelerator configuration for these instances.

func (InstanceGroupConfigOutput) DiskConfig

Optional. Disk option config settings.

func (InstanceGroupConfigOutput) ElementType

func (InstanceGroupConfigOutput) ElementType() reflect.Type

func (InstanceGroupConfigOutput) ImageUri

Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.

func (InstanceGroupConfigOutput) InstanceFlexibilityPolicy added in v0.32.0

func (o InstanceGroupConfigOutput) InstanceFlexibilityPolicy() InstanceFlexibilityPolicyPtrOutput

Optional. Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.

func (InstanceGroupConfigOutput) MachineTypeUri

Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2.

func (InstanceGroupConfigOutput) MinCpuPlatform

Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).

func (InstanceGroupConfigOutput) MinNumInstances added in v0.32.0

func (o InstanceGroupConfigOutput) MinNumInstances() pulumi.IntPtrOutput

Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted.

func (InstanceGroupConfigOutput) NumInstances

Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.

func (InstanceGroupConfigOutput) Preemptibility

Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE.

func (InstanceGroupConfigOutput) StartupConfig added in v0.32.0

Optional. Configuration to handle the startup of instances during cluster create and update process.

func (InstanceGroupConfigOutput) ToInstanceGroupConfigOutput

func (o InstanceGroupConfigOutput) ToInstanceGroupConfigOutput() InstanceGroupConfigOutput

func (InstanceGroupConfigOutput) ToInstanceGroupConfigOutputWithContext

func (o InstanceGroupConfigOutput) ToInstanceGroupConfigOutputWithContext(ctx context.Context) InstanceGroupConfigOutput

func (InstanceGroupConfigOutput) ToInstanceGroupConfigPtrOutput

func (o InstanceGroupConfigOutput) ToInstanceGroupConfigPtrOutput() InstanceGroupConfigPtrOutput

func (InstanceGroupConfigOutput) ToInstanceGroupConfigPtrOutputWithContext

func (o InstanceGroupConfigOutput) ToInstanceGroupConfigPtrOutputWithContext(ctx context.Context) InstanceGroupConfigPtrOutput

type InstanceGroupConfigPreemptibility added in v0.4.0

type InstanceGroupConfigPreemptibility string

Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE.

func (InstanceGroupConfigPreemptibility) ElementType added in v0.4.0

func (InstanceGroupConfigPreemptibility) ToInstanceGroupConfigPreemptibilityOutput added in v0.6.0

func (e InstanceGroupConfigPreemptibility) ToInstanceGroupConfigPreemptibilityOutput() InstanceGroupConfigPreemptibilityOutput

func (InstanceGroupConfigPreemptibility) ToInstanceGroupConfigPreemptibilityOutputWithContext added in v0.6.0

func (e InstanceGroupConfigPreemptibility) ToInstanceGroupConfigPreemptibilityOutputWithContext(ctx context.Context) InstanceGroupConfigPreemptibilityOutput

func (InstanceGroupConfigPreemptibility) ToInstanceGroupConfigPreemptibilityPtrOutput added in v0.6.0

func (e InstanceGroupConfigPreemptibility) ToInstanceGroupConfigPreemptibilityPtrOutput() InstanceGroupConfigPreemptibilityPtrOutput

func (InstanceGroupConfigPreemptibility) ToInstanceGroupConfigPreemptibilityPtrOutputWithContext added in v0.6.0

func (e InstanceGroupConfigPreemptibility) ToInstanceGroupConfigPreemptibilityPtrOutputWithContext(ctx context.Context) InstanceGroupConfigPreemptibilityPtrOutput

func (InstanceGroupConfigPreemptibility) ToStringOutput added in v0.4.0

func (InstanceGroupConfigPreemptibility) ToStringOutputWithContext added in v0.4.0

func (e InstanceGroupConfigPreemptibility) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput

func (InstanceGroupConfigPreemptibility) ToStringPtrOutput added in v0.4.0

func (InstanceGroupConfigPreemptibility) ToStringPtrOutputWithContext added in v0.4.0

func (e InstanceGroupConfigPreemptibility) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput

type InstanceGroupConfigPreemptibilityInput added in v0.6.0

type InstanceGroupConfigPreemptibilityInput interface {
	pulumi.Input

	ToInstanceGroupConfigPreemptibilityOutput() InstanceGroupConfigPreemptibilityOutput
	ToInstanceGroupConfigPreemptibilityOutputWithContext(context.Context) InstanceGroupConfigPreemptibilityOutput
}

InstanceGroupConfigPreemptibilityInput is an input type that accepts InstanceGroupConfigPreemptibilityArgs and InstanceGroupConfigPreemptibilityOutput values. You can construct a concrete instance of `InstanceGroupConfigPreemptibilityInput` via:

InstanceGroupConfigPreemptibilityArgs{...}

type InstanceGroupConfigPreemptibilityOutput added in v0.6.0

type InstanceGroupConfigPreemptibilityOutput struct{ *pulumi.OutputState }

func (InstanceGroupConfigPreemptibilityOutput) ElementType added in v0.6.0

func (InstanceGroupConfigPreemptibilityOutput) ToInstanceGroupConfigPreemptibilityOutput added in v0.6.0

func (o InstanceGroupConfigPreemptibilityOutput) ToInstanceGroupConfigPreemptibilityOutput() InstanceGroupConfigPreemptibilityOutput

func (InstanceGroupConfigPreemptibilityOutput) ToInstanceGroupConfigPreemptibilityOutputWithContext added in v0.6.0

func (o InstanceGroupConfigPreemptibilityOutput) ToInstanceGroupConfigPreemptibilityOutputWithContext(ctx context.Context) InstanceGroupConfigPreemptibilityOutput

func (InstanceGroupConfigPreemptibilityOutput) ToInstanceGroupConfigPreemptibilityPtrOutput added in v0.6.0

func (o InstanceGroupConfigPreemptibilityOutput) ToInstanceGroupConfigPreemptibilityPtrOutput() InstanceGroupConfigPreemptibilityPtrOutput

func (InstanceGroupConfigPreemptibilityOutput) ToInstanceGroupConfigPreemptibilityPtrOutputWithContext added in v0.6.0

func (o InstanceGroupConfigPreemptibilityOutput) ToInstanceGroupConfigPreemptibilityPtrOutputWithContext(ctx context.Context) InstanceGroupConfigPreemptibilityPtrOutput

func (InstanceGroupConfigPreemptibilityOutput) ToStringOutput added in v0.6.0

func (InstanceGroupConfigPreemptibilityOutput) ToStringOutputWithContext added in v0.6.0

func (InstanceGroupConfigPreemptibilityOutput) ToStringPtrOutput added in v0.6.0

func (InstanceGroupConfigPreemptibilityOutput) ToStringPtrOutputWithContext added in v0.6.0

type InstanceGroupConfigPreemptibilityPtrInput added in v0.6.0

type InstanceGroupConfigPreemptibilityPtrInput interface {
	pulumi.Input

	ToInstanceGroupConfigPreemptibilityPtrOutput() InstanceGroupConfigPreemptibilityPtrOutput
	ToInstanceGroupConfigPreemptibilityPtrOutputWithContext(context.Context) InstanceGroupConfigPreemptibilityPtrOutput
}

func InstanceGroupConfigPreemptibilityPtr added in v0.6.0

func InstanceGroupConfigPreemptibilityPtr(v string) InstanceGroupConfigPreemptibilityPtrInput

type InstanceGroupConfigPreemptibilityPtrOutput added in v0.6.0

type InstanceGroupConfigPreemptibilityPtrOutput struct{ *pulumi.OutputState }

func (InstanceGroupConfigPreemptibilityPtrOutput) Elem added in v0.6.0

func (InstanceGroupConfigPreemptibilityPtrOutput) ElementType added in v0.6.0

func (InstanceGroupConfigPreemptibilityPtrOutput) ToInstanceGroupConfigPreemptibilityPtrOutput added in v0.6.0

func (o InstanceGroupConfigPreemptibilityPtrOutput) ToInstanceGroupConfigPreemptibilityPtrOutput() InstanceGroupConfigPreemptibilityPtrOutput

func (InstanceGroupConfigPreemptibilityPtrOutput) ToInstanceGroupConfigPreemptibilityPtrOutputWithContext added in v0.6.0

func (o InstanceGroupConfigPreemptibilityPtrOutput) ToInstanceGroupConfigPreemptibilityPtrOutputWithContext(ctx context.Context) InstanceGroupConfigPreemptibilityPtrOutput

func (InstanceGroupConfigPreemptibilityPtrOutput) ToStringPtrOutput added in v0.6.0

func (InstanceGroupConfigPreemptibilityPtrOutput) ToStringPtrOutputWithContext added in v0.6.0

type InstanceGroupConfigPtrInput

type InstanceGroupConfigPtrInput interface {
	pulumi.Input

	ToInstanceGroupConfigPtrOutput() InstanceGroupConfigPtrOutput
	ToInstanceGroupConfigPtrOutputWithContext(context.Context) InstanceGroupConfigPtrOutput
}

InstanceGroupConfigPtrInput is an input type that accepts InstanceGroupConfigArgs, InstanceGroupConfigPtr and InstanceGroupConfigPtrOutput values. You can construct a concrete instance of `InstanceGroupConfigPtrInput` via:

        InstanceGroupConfigArgs{...}

or:

        nil

type InstanceGroupConfigPtrOutput

type InstanceGroupConfigPtrOutput struct{ *pulumi.OutputState }

func (InstanceGroupConfigPtrOutput) Accelerators

Optional. The Compute Engine accelerator configuration for these instances.

func (InstanceGroupConfigPtrOutput) DiskConfig

Optional. Disk option config settings.

func (InstanceGroupConfigPtrOutput) Elem

func (InstanceGroupConfigPtrOutput) ElementType

func (InstanceGroupConfigPtrOutput) ImageUri

Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.

func (InstanceGroupConfigPtrOutput) InstanceFlexibilityPolicy added in v0.32.0

Optional. Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.

func (InstanceGroupConfigPtrOutput) MachineTypeUri

Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2.

func (InstanceGroupConfigPtrOutput) MinCpuPlatform

Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).

func (InstanceGroupConfigPtrOutput) MinNumInstances added in v0.32.0

func (o InstanceGroupConfigPtrOutput) MinNumInstances() pulumi.IntPtrOutput

Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted.

func (InstanceGroupConfigPtrOutput) NumInstances

Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.

func (InstanceGroupConfigPtrOutput) Preemptibility

Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE.

func (InstanceGroupConfigPtrOutput) StartupConfig added in v0.32.0

Optional. Configuration to handle the startup of instances during cluster create and update process.

func (InstanceGroupConfigPtrOutput) ToInstanceGroupConfigPtrOutput

func (o InstanceGroupConfigPtrOutput) ToInstanceGroupConfigPtrOutput() InstanceGroupConfigPtrOutput

func (InstanceGroupConfigPtrOutput) ToInstanceGroupConfigPtrOutputWithContext

func (o InstanceGroupConfigPtrOutput) ToInstanceGroupConfigPtrOutputWithContext(ctx context.Context) InstanceGroupConfigPtrOutput

type InstanceGroupConfigResponse

type InstanceGroupConfigResponse struct {
	// Optional. The Compute Engine accelerator configuration for these instances.
	Accelerators []AcceleratorConfigResponse `pulumi:"accelerators"`
	// Optional. Disk option config settings.
	DiskConfig DiskConfigResponse `pulumi:"diskConfig"`
	// Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
	ImageUri string `pulumi:"imageUri"`
	// Optional. Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.
	InstanceFlexibilityPolicy InstanceFlexibilityPolicyResponse `pulumi:"instanceFlexibilityPolicy"`
	// The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
	InstanceNames []string `pulumi:"instanceNames"`
	// List of references to Compute Engine instances.
	InstanceReferences []InstanceReferenceResponse `pulumi:"instanceReferences"`
	// Specifies that this instance group contains preemptible instances.
	IsPreemptible bool `pulumi:"isPreemptible"`
	// Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2.
	MachineTypeUri string `pulumi:"machineTypeUri"`
	// The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
	ManagedGroupConfig ManagedGroupConfigResponse `pulumi:"managedGroupConfig"`
	// Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
	MinCpuPlatform string `pulumi:"minCpuPlatform"`
	// Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted.
	MinNumInstances int `pulumi:"minNumInstances"`
	// Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
	NumInstances int `pulumi:"numInstances"`
	// Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE.
	Preemptibility string `pulumi:"preemptibility"`
	// Optional. Configuration to handle the startup of instances during cluster create and update process.
	StartupConfig StartupConfigResponse `pulumi:"startupConfig"`
}

The config settings for Compute Engine resources in an instance group, such as a master or worker group.

type InstanceGroupConfigResponseOutput

type InstanceGroupConfigResponseOutput struct{ *pulumi.OutputState }

The config settings for Compute Engine resources in an instance group, such as a master or worker group.

func (InstanceGroupConfigResponseOutput) Accelerators

Optional. The Compute Engine accelerator configuration for these instances.

func (InstanceGroupConfigResponseOutput) DiskConfig

Optional. Disk option config settings.

func (InstanceGroupConfigResponseOutput) ElementType

func (InstanceGroupConfigResponseOutput) ImageUri

Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.

func (InstanceGroupConfigResponseOutput) InstanceFlexibilityPolicy added in v0.32.0

Optional. Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.

func (InstanceGroupConfigResponseOutput) InstanceNames

The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.

func (InstanceGroupConfigResponseOutput) InstanceReferences

List of references to Compute Engine instances.

func (InstanceGroupConfigResponseOutput) IsPreemptible

Specifies that this instance group contains preemptible instances.

func (InstanceGroupConfigResponseOutput) MachineTypeUri

Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2.

func (InstanceGroupConfigResponseOutput) ManagedGroupConfig

The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.

func (InstanceGroupConfigResponseOutput) MinCpuPlatform

Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).

func (InstanceGroupConfigResponseOutput) MinNumInstances added in v0.32.0

Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted.

func (InstanceGroupConfigResponseOutput) NumInstances

Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.

func (InstanceGroupConfigResponseOutput) Preemptibility

Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE.

func (InstanceGroupConfigResponseOutput) StartupConfig added in v0.32.0

Optional. Configuration to handle the startup of instances during cluster create and update process.

func (InstanceGroupConfigResponseOutput) ToInstanceGroupConfigResponseOutput

func (o InstanceGroupConfigResponseOutput) ToInstanceGroupConfigResponseOutput() InstanceGroupConfigResponseOutput

func (InstanceGroupConfigResponseOutput) ToInstanceGroupConfigResponseOutputWithContext

func (o InstanceGroupConfigResponseOutput) ToInstanceGroupConfigResponseOutputWithContext(ctx context.Context) InstanceGroupConfigResponseOutput

type InstanceReferenceResponse

type InstanceReferenceResponse struct {
	// The unique identifier of the Compute Engine instance.
	InstanceId string `pulumi:"instanceId"`
	// The user-friendly name of the Compute Engine instance.
	InstanceName string `pulumi:"instanceName"`
	// The public ECIES key used for sharing data with this instance.
	PublicEciesKey string `pulumi:"publicEciesKey"`
	// The public RSA key used for sharing data with this instance.
	PublicKey string `pulumi:"publicKey"`
}

A reference to a Compute Engine instance.

type InstanceReferenceResponseArrayOutput

type InstanceReferenceResponseArrayOutput struct{ *pulumi.OutputState }

func (InstanceReferenceResponseArrayOutput) ElementType

func (InstanceReferenceResponseArrayOutput) Index

func (InstanceReferenceResponseArrayOutput) ToInstanceReferenceResponseArrayOutput

func (o InstanceReferenceResponseArrayOutput) ToInstanceReferenceResponseArrayOutput() InstanceReferenceResponseArrayOutput

func (InstanceReferenceResponseArrayOutput) ToInstanceReferenceResponseArrayOutputWithContext

func (o InstanceReferenceResponseArrayOutput) ToInstanceReferenceResponseArrayOutputWithContext(ctx context.Context) InstanceReferenceResponseArrayOutput

type InstanceReferenceResponseOutput

type InstanceReferenceResponseOutput struct{ *pulumi.OutputState }

A reference to a Compute Engine instance.

func (InstanceReferenceResponseOutput) ElementType

func (InstanceReferenceResponseOutput) InstanceId

The unique identifier of the Compute Engine instance.

func (InstanceReferenceResponseOutput) InstanceName

The user-friendly name of the Compute Engine instance.

func (InstanceReferenceResponseOutput) PublicEciesKey added in v0.5.0

The public ECIES key used for sharing data with this instance.

func (InstanceReferenceResponseOutput) PublicKey

The public RSA key used for sharing data with this instance.

func (InstanceReferenceResponseOutput) ToInstanceReferenceResponseOutput

func (o InstanceReferenceResponseOutput) ToInstanceReferenceResponseOutput() InstanceReferenceResponseOutput

func (InstanceReferenceResponseOutput) ToInstanceReferenceResponseOutputWithContext

func (o InstanceReferenceResponseOutput) ToInstanceReferenceResponseOutputWithContext(ctx context.Context) InstanceReferenceResponseOutput

type InstanceSelection added in v0.32.0

type InstanceSelection struct {
	// Optional. Full machine-type names, e.g. "n1-standard-16".
	MachineTypes []string `pulumi:"machineTypes"`
	// Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.
	Rank *int `pulumi:"rank"`
}

Defines machines types and a rank to which the machines types belong.

type InstanceSelectionArgs added in v0.32.0

type InstanceSelectionArgs struct {
	// Optional. Full machine-type names, e.g. "n1-standard-16".
	MachineTypes pulumi.StringArrayInput `pulumi:"machineTypes"`
	// Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.
	Rank pulumi.IntPtrInput `pulumi:"rank"`
}

Defines machines types and a rank to which the machines types belong.

func (InstanceSelectionArgs) ElementType added in v0.32.0

func (InstanceSelectionArgs) ElementType() reflect.Type

func (InstanceSelectionArgs) ToInstanceSelectionOutput added in v0.32.0

func (i InstanceSelectionArgs) ToInstanceSelectionOutput() InstanceSelectionOutput

func (InstanceSelectionArgs) ToInstanceSelectionOutputWithContext added in v0.32.0

func (i InstanceSelectionArgs) ToInstanceSelectionOutputWithContext(ctx context.Context) InstanceSelectionOutput

type InstanceSelectionArray added in v0.32.0

type InstanceSelectionArray []InstanceSelectionInput

func (InstanceSelectionArray) ElementType added in v0.32.0

func (InstanceSelectionArray) ElementType() reflect.Type

func (InstanceSelectionArray) ToInstanceSelectionArrayOutput added in v0.32.0

func (i InstanceSelectionArray) ToInstanceSelectionArrayOutput() InstanceSelectionArrayOutput

func (InstanceSelectionArray) ToInstanceSelectionArrayOutputWithContext added in v0.32.0

func (i InstanceSelectionArray) ToInstanceSelectionArrayOutputWithContext(ctx context.Context) InstanceSelectionArrayOutput

type InstanceSelectionArrayInput added in v0.32.0

type InstanceSelectionArrayInput interface {
	pulumi.Input

	ToInstanceSelectionArrayOutput() InstanceSelectionArrayOutput
	ToInstanceSelectionArrayOutputWithContext(context.Context) InstanceSelectionArrayOutput
}

InstanceSelectionArrayInput is an input type that accepts InstanceSelectionArray and InstanceSelectionArrayOutput values. You can construct a concrete instance of `InstanceSelectionArrayInput` via:

InstanceSelectionArray{ InstanceSelectionArgs{...} }

type InstanceSelectionArrayOutput added in v0.32.0

type InstanceSelectionArrayOutput struct{ *pulumi.OutputState }

func (InstanceSelectionArrayOutput) ElementType added in v0.32.0

func (InstanceSelectionArrayOutput) Index added in v0.32.0

func (InstanceSelectionArrayOutput) ToInstanceSelectionArrayOutput added in v0.32.0

func (o InstanceSelectionArrayOutput) ToInstanceSelectionArrayOutput() InstanceSelectionArrayOutput

func (InstanceSelectionArrayOutput) ToInstanceSelectionArrayOutputWithContext added in v0.32.0

func (o InstanceSelectionArrayOutput) ToInstanceSelectionArrayOutputWithContext(ctx context.Context) InstanceSelectionArrayOutput

type InstanceSelectionInput added in v0.32.0

type InstanceSelectionInput interface {
	pulumi.Input

	ToInstanceSelectionOutput() InstanceSelectionOutput
	ToInstanceSelectionOutputWithContext(context.Context) InstanceSelectionOutput
}

InstanceSelectionInput is an input type that accepts InstanceSelectionArgs and InstanceSelectionOutput values. You can construct a concrete instance of `InstanceSelectionInput` via:

InstanceSelectionArgs{...}

type InstanceSelectionOutput added in v0.32.0

type InstanceSelectionOutput struct{ *pulumi.OutputState }

Defines machines types and a rank to which the machines types belong.

func (InstanceSelectionOutput) ElementType added in v0.32.0

func (InstanceSelectionOutput) ElementType() reflect.Type

func (InstanceSelectionOutput) MachineTypes added in v0.32.0

Optional. Full machine-type names, e.g. "n1-standard-16".

func (InstanceSelectionOutput) Rank added in v0.32.0

Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.

func (InstanceSelectionOutput) ToInstanceSelectionOutput added in v0.32.0

func (o InstanceSelectionOutput) ToInstanceSelectionOutput() InstanceSelectionOutput

func (InstanceSelectionOutput) ToInstanceSelectionOutputWithContext added in v0.32.0

func (o InstanceSelectionOutput) ToInstanceSelectionOutputWithContext(ctx context.Context) InstanceSelectionOutput

type InstanceSelectionResponse added in v0.32.0

type InstanceSelectionResponse struct {
	// Optional. Full machine-type names, e.g. "n1-standard-16".
	MachineTypes []string `pulumi:"machineTypes"`
	// Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.
	Rank int `pulumi:"rank"`
}

Defines machines types and a rank to which the machines types belong.

type InstanceSelectionResponseArrayOutput added in v0.32.0

type InstanceSelectionResponseArrayOutput struct{ *pulumi.OutputState }

func (InstanceSelectionResponseArrayOutput) ElementType added in v0.32.0

func (InstanceSelectionResponseArrayOutput) Index added in v0.32.0

func (InstanceSelectionResponseArrayOutput) ToInstanceSelectionResponseArrayOutput added in v0.32.0

func (o InstanceSelectionResponseArrayOutput) ToInstanceSelectionResponseArrayOutput() InstanceSelectionResponseArrayOutput

func (InstanceSelectionResponseArrayOutput) ToInstanceSelectionResponseArrayOutputWithContext added in v0.32.0

func (o InstanceSelectionResponseArrayOutput) ToInstanceSelectionResponseArrayOutputWithContext(ctx context.Context) InstanceSelectionResponseArrayOutput

type InstanceSelectionResponseOutput added in v0.32.0

type InstanceSelectionResponseOutput struct{ *pulumi.OutputState }

Defines machines types and a rank to which the machines types belong.

func (InstanceSelectionResponseOutput) ElementType added in v0.32.0

func (InstanceSelectionResponseOutput) MachineTypes added in v0.32.0

Optional. Full machine-type names, e.g. "n1-standard-16".

func (InstanceSelectionResponseOutput) Rank added in v0.32.0

Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.

func (InstanceSelectionResponseOutput) ToInstanceSelectionResponseOutput added in v0.32.0

func (o InstanceSelectionResponseOutput) ToInstanceSelectionResponseOutput() InstanceSelectionResponseOutput

func (InstanceSelectionResponseOutput) ToInstanceSelectionResponseOutputWithContext added in v0.32.0

func (o InstanceSelectionResponseOutput) ToInstanceSelectionResponseOutputWithContext(ctx context.Context) InstanceSelectionResponseOutput

type InstanceSelectionResultResponse added in v0.32.0

type InstanceSelectionResultResponse struct {
	// Full machine-type names, e.g. "n1-standard-16".
	MachineType string `pulumi:"machineType"`
	// Number of VM provisioned with the machine_type.
	VmCount int `pulumi:"vmCount"`
}

Defines a mapping from machine types to the number of VMs that are created with each machine type.

type InstanceSelectionResultResponseArrayOutput added in v0.32.0

type InstanceSelectionResultResponseArrayOutput struct{ *pulumi.OutputState }

func (InstanceSelectionResultResponseArrayOutput) ElementType added in v0.32.0

func (InstanceSelectionResultResponseArrayOutput) Index added in v0.32.0

func (InstanceSelectionResultResponseArrayOutput) ToInstanceSelectionResultResponseArrayOutput added in v0.32.0

func (o InstanceSelectionResultResponseArrayOutput) ToInstanceSelectionResultResponseArrayOutput() InstanceSelectionResultResponseArrayOutput

func (InstanceSelectionResultResponseArrayOutput) ToInstanceSelectionResultResponseArrayOutputWithContext added in v0.32.0

func (o InstanceSelectionResultResponseArrayOutput) ToInstanceSelectionResultResponseArrayOutputWithContext(ctx context.Context) InstanceSelectionResultResponseArrayOutput

type InstanceSelectionResultResponseOutput added in v0.32.0

type InstanceSelectionResultResponseOutput struct{ *pulumi.OutputState }

Defines a mapping from machine types to the number of VMs that are created with each machine type.

func (InstanceSelectionResultResponseOutput) ElementType added in v0.32.0

func (InstanceSelectionResultResponseOutput) MachineType added in v0.32.0

Full machine-type names, e.g. "n1-standard-16".

func (InstanceSelectionResultResponseOutput) ToInstanceSelectionResultResponseOutput added in v0.32.0

func (o InstanceSelectionResultResponseOutput) ToInstanceSelectionResultResponseOutput() InstanceSelectionResultResponseOutput

func (InstanceSelectionResultResponseOutput) ToInstanceSelectionResultResponseOutputWithContext added in v0.32.0

func (o InstanceSelectionResultResponseOutput) ToInstanceSelectionResultResponseOutputWithContext(ctx context.Context) InstanceSelectionResultResponseOutput

func (InstanceSelectionResultResponseOutput) VmCount added in v0.32.0

Number of VM provisioned with the machine_type.

type Job added in v0.3.0

type Job struct {
	pulumi.CustomResourceState

	// Indicates whether the job is completed. If the value is false, the job is still in progress. If true, the job is completed, and status.state field will indicate if it was successful, failed, or cancelled.
	Done pulumi.BoolOutput `pulumi:"done"`
	// If present, the location of miscellaneous control files which can be used as part of job setup and handling. If not present, control files might be placed in the same location as driver_output_uri.
	DriverControlFilesUri pulumi.StringOutput `pulumi:"driverControlFilesUri"`
	// A URI pointing to the location of the stdout of the job's driver program.
	DriverOutputResourceUri pulumi.StringOutput `pulumi:"driverOutputResourceUri"`
	// Optional. Driver scheduling configuration.
	DriverSchedulingConfig DriverSchedulingConfigResponseOutput `pulumi:"driverSchedulingConfig"`
	// Optional. Job is a Flink job.
	FlinkJob FlinkJobResponseOutput `pulumi:"flinkJob"`
	// Optional. Job is a Hadoop job.
	HadoopJob HadoopJobResponseOutput `pulumi:"hadoopJob"`
	// Optional. Job is a Hive job.
	HiveJob HiveJobResponseOutput `pulumi:"hiveJob"`
	// A UUID that uniquely identifies a job within the project over time. This is in contrast to a user-settable reference.job_id that might be reused over time.
	JobUuid pulumi.StringOutput `pulumi:"jobUuid"`
	// Optional. The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
	Labels pulumi.StringMapOutput `pulumi:"labels"`
	// Optional. Job is a Pig job.
	PigJob PigJobResponseOutput `pulumi:"pigJob"`
	// Job information, including how, when, and where to run the job.
	Placement JobPlacementResponseOutput `pulumi:"placement"`
	// Optional. Job is a Presto job.
	PrestoJob PrestoJobResponseOutput `pulumi:"prestoJob"`
	Project   pulumi.StringOutput     `pulumi:"project"`
	// Optional. Job is a PySpark job.
	PysparkJob PySparkJobResponseOutput `pulumi:"pysparkJob"`
	// Optional. The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id.
	Reference JobReferenceResponseOutput `pulumi:"reference"`
	Region    pulumi.StringOutput        `pulumi:"region"`
	// Optional. Job scheduling configuration.
	Scheduling JobSchedulingResponseOutput `pulumi:"scheduling"`
	// Optional. Job is a Spark job.
	SparkJob SparkJobResponseOutput `pulumi:"sparkJob"`
	// Optional. Job is a SparkR job.
	SparkRJob SparkRJobResponseOutput `pulumi:"sparkRJob"`
	// Optional. Job is a SparkSql job.
	SparkSqlJob SparkSqlJobResponseOutput `pulumi:"sparkSqlJob"`
	// The job status. Additional application-specific status information might be contained in the type_job and yarn_applications fields.
	Status JobStatusResponseOutput `pulumi:"status"`
	// The previous job status.
	StatusHistory JobStatusResponseArrayOutput `pulumi:"statusHistory"`
	// Optional. Job is a Trino job.
	TrinoJob TrinoJobResponseOutput `pulumi:"trinoJob"`
	// The collection of YARN applications spun up by this job.Beta Feature: This report is available for testing purposes only. It might be changed before final release.
	YarnApplications YarnApplicationResponseArrayOutput `pulumi:"yarnApplications"`
}

Submits a job to a cluster. Auto-naming is currently not supported for this resource.

func GetJob added in v0.3.0

func GetJob(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *JobState, opts ...pulumi.ResourceOption) (*Job, error)

GetJob gets an existing Job resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewJob added in v0.3.0

func NewJob(ctx *pulumi.Context,
	name string, args *JobArgs, opts ...pulumi.ResourceOption) (*Job, error)

NewJob registers a new resource with the given unique name, arguments, and options.

func (*Job) ElementType added in v0.3.0

func (*Job) ElementType() reflect.Type

func (*Job) ToJobOutput added in v0.3.0

func (i *Job) ToJobOutput() JobOutput

func (*Job) ToJobOutputWithContext added in v0.3.0

func (i *Job) ToJobOutputWithContext(ctx context.Context) JobOutput

type JobArgs added in v0.3.0

type JobArgs struct {
	// Optional. Driver scheduling configuration.
	DriverSchedulingConfig DriverSchedulingConfigPtrInput
	// Optional. Job is a Flink job.
	FlinkJob FlinkJobPtrInput
	// Optional. Job is a Hadoop job.
	HadoopJob HadoopJobPtrInput
	// Optional. Job is a Hive job.
	HiveJob HiveJobPtrInput
	// Optional. The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
	Labels pulumi.StringMapInput
	// Optional. Job is a Pig job.
	PigJob PigJobPtrInput
	// Job information, including how, when, and where to run the job.
	Placement JobPlacementInput
	// Optional. Job is a Presto job.
	PrestoJob PrestoJobPtrInput
	Project   pulumi.StringPtrInput
	// Optional. Job is a PySpark job.
	PysparkJob PySparkJobPtrInput
	// Optional. The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id.
	Reference JobReferencePtrInput
	Region    pulumi.StringInput
	// Optional. A unique id used to identify the request. If the server receives two SubmitJobRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s with the same id, then the second request will be ignored and the first Job created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId pulumi.StringPtrInput
	// Optional. Job scheduling configuration.
	Scheduling JobSchedulingPtrInput
	// Optional. Job is a Spark job.
	SparkJob SparkJobPtrInput
	// Optional. Job is a SparkR job.
	SparkRJob SparkRJobPtrInput
	// Optional. Job is a SparkSql job.
	SparkSqlJob SparkSqlJobPtrInput
	// Optional. Job is a Trino job.
	TrinoJob TrinoJobPtrInput
}

The set of arguments for constructing a Job resource.

func (JobArgs) ElementType added in v0.3.0

func (JobArgs) ElementType() reflect.Type

type JobInput added in v0.3.0

type JobInput interface {
	pulumi.Input

	ToJobOutput() JobOutput
	ToJobOutputWithContext(ctx context.Context) JobOutput
}

type JobOutput added in v0.3.0

type JobOutput struct{ *pulumi.OutputState }

func (JobOutput) Done added in v0.19.0

func (o JobOutput) Done() pulumi.BoolOutput

Indicates whether the job is completed. If the value is false, the job is still in progress. If true, the job is completed, and status.state field will indicate if it was successful, failed, or cancelled.

func (JobOutput) DriverControlFilesUri added in v0.19.0

func (o JobOutput) DriverControlFilesUri() pulumi.StringOutput

If present, the location of miscellaneous control files which can be used as part of job setup and handling. If not present, control files might be placed in the same location as driver_output_uri.

func (JobOutput) DriverOutputResourceUri added in v0.19.0

func (o JobOutput) DriverOutputResourceUri() pulumi.StringOutput

A URI pointing to the location of the stdout of the job's driver program.

func (JobOutput) DriverSchedulingConfig added in v0.28.0

func (o JobOutput) DriverSchedulingConfig() DriverSchedulingConfigResponseOutput

Optional. Driver scheduling configuration.

func (JobOutput) ElementType added in v0.3.0

func (JobOutput) ElementType() reflect.Type

func (JobOutput) FlinkJob added in v0.32.0

func (o JobOutput) FlinkJob() FlinkJobResponseOutput

Optional. Job is a Flink job.

func (JobOutput) HadoopJob added in v0.19.0

func (o JobOutput) HadoopJob() HadoopJobResponseOutput

Optional. Job is a Hadoop job.

func (JobOutput) HiveJob added in v0.19.0

func (o JobOutput) HiveJob() HiveJobResponseOutput

Optional. Job is a Hive job.

func (JobOutput) JobUuid added in v0.19.0

func (o JobOutput) JobUuid() pulumi.StringOutput

A UUID that uniquely identifies a job within the project over time. This is in contrast to a user-settable reference.job_id that might be reused over time.

func (JobOutput) Labels added in v0.19.0

func (o JobOutput) Labels() pulumi.StringMapOutput

Optional. The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.

func (JobOutput) PigJob added in v0.19.0

func (o JobOutput) PigJob() PigJobResponseOutput

Optional. Job is a Pig job.

func (JobOutput) Placement added in v0.19.0

func (o JobOutput) Placement() JobPlacementResponseOutput

Job information, including how, when, and where to run the job.

func (JobOutput) PrestoJob added in v0.19.0

func (o JobOutput) PrestoJob() PrestoJobResponseOutput

Optional. Job is a Presto job.

func (JobOutput) Project added in v0.21.0

func (o JobOutput) Project() pulumi.StringOutput

func (JobOutput) PysparkJob added in v0.19.0

func (o JobOutput) PysparkJob() PySparkJobResponseOutput

Optional. Job is a PySpark job.

func (JobOutput) Reference added in v0.19.0

func (o JobOutput) Reference() JobReferenceResponseOutput

Optional. The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id.

func (JobOutput) Region added in v0.21.0

func (o JobOutput) Region() pulumi.StringOutput

func (JobOutput) Scheduling added in v0.19.0

func (o JobOutput) Scheduling() JobSchedulingResponseOutput

Optional. Job scheduling configuration.

func (JobOutput) SparkJob added in v0.19.0

func (o JobOutput) SparkJob() SparkJobResponseOutput

Optional. Job is a Spark job.

func (JobOutput) SparkRJob added in v0.19.0

func (o JobOutput) SparkRJob() SparkRJobResponseOutput

Optional. Job is a SparkR job.

func (JobOutput) SparkSqlJob added in v0.19.0

func (o JobOutput) SparkSqlJob() SparkSqlJobResponseOutput

Optional. Job is a SparkSql job.

func (JobOutput) Status added in v0.19.0

The job status. Additional application-specific status information might be contained in the type_job and yarn_applications fields.

func (JobOutput) StatusHistory added in v0.19.0

func (o JobOutput) StatusHistory() JobStatusResponseArrayOutput

The previous job status.

func (JobOutput) ToJobOutput added in v0.3.0

func (o JobOutput) ToJobOutput() JobOutput

func (JobOutput) ToJobOutputWithContext added in v0.3.0

func (o JobOutput) ToJobOutputWithContext(ctx context.Context) JobOutput

func (JobOutput) TrinoJob added in v0.26.0

func (o JobOutput) TrinoJob() TrinoJobResponseOutput

Optional. Job is a Trino job.

func (JobOutput) YarnApplications added in v0.19.0

func (o JobOutput) YarnApplications() YarnApplicationResponseArrayOutput

The collection of YARN applications spun up by this job.Beta Feature: This report is available for testing purposes only. It might be changed before final release.

type JobPlacement

type JobPlacement struct {
	// Optional. Cluster labels to identify a cluster where the job will be submitted.
	ClusterLabels map[string]string `pulumi:"clusterLabels"`
	// The name of the cluster where the job will be submitted.
	ClusterName string `pulumi:"clusterName"`
}

Dataproc job config.

type JobPlacementArgs

type JobPlacementArgs struct {
	// Optional. Cluster labels to identify a cluster where the job will be submitted.
	ClusterLabels pulumi.StringMapInput `pulumi:"clusterLabels"`
	// The name of the cluster where the job will be submitted.
	ClusterName pulumi.StringInput `pulumi:"clusterName"`
}

Dataproc job config.

func (JobPlacementArgs) ElementType

func (JobPlacementArgs) ElementType() reflect.Type

func (JobPlacementArgs) ToJobPlacementOutput

func (i JobPlacementArgs) ToJobPlacementOutput() JobPlacementOutput

func (JobPlacementArgs) ToJobPlacementOutputWithContext

func (i JobPlacementArgs) ToJobPlacementOutputWithContext(ctx context.Context) JobPlacementOutput

type JobPlacementInput

type JobPlacementInput interface {
	pulumi.Input

	ToJobPlacementOutput() JobPlacementOutput
	ToJobPlacementOutputWithContext(context.Context) JobPlacementOutput
}

JobPlacementInput is an input type that accepts JobPlacementArgs and JobPlacementOutput values. You can construct a concrete instance of `JobPlacementInput` via:

JobPlacementArgs{...}

type JobPlacementOutput

type JobPlacementOutput struct{ *pulumi.OutputState }

Dataproc job config.

func (JobPlacementOutput) ClusterLabels

func (o JobPlacementOutput) ClusterLabels() pulumi.StringMapOutput

Optional. Cluster labels to identify a cluster where the job will be submitted.

func (JobPlacementOutput) ClusterName

func (o JobPlacementOutput) ClusterName() pulumi.StringOutput

The name of the cluster where the job will be submitted.

func (JobPlacementOutput) ElementType

func (JobPlacementOutput) ElementType() reflect.Type

func (JobPlacementOutput) ToJobPlacementOutput

func (o JobPlacementOutput) ToJobPlacementOutput() JobPlacementOutput

func (JobPlacementOutput) ToJobPlacementOutputWithContext

func (o JobPlacementOutput) ToJobPlacementOutputWithContext(ctx context.Context) JobPlacementOutput

type JobPlacementResponse

type JobPlacementResponse struct {
	// Optional. Cluster labels to identify a cluster where the job will be submitted.
	ClusterLabels map[string]string `pulumi:"clusterLabels"`
	// The name of the cluster where the job will be submitted.
	ClusterName string `pulumi:"clusterName"`
	// A cluster UUID generated by the Dataproc service when the job is submitted.
	ClusterUuid string `pulumi:"clusterUuid"`
}

Dataproc job config.

type JobPlacementResponseOutput

type JobPlacementResponseOutput struct{ *pulumi.OutputState }

Dataproc job config.

func (JobPlacementResponseOutput) ClusterLabels

Optional. Cluster labels to identify a cluster where the job will be submitted.

func (JobPlacementResponseOutput) ClusterName

The name of the cluster where the job will be submitted.

func (JobPlacementResponseOutput) ClusterUuid

A cluster UUID generated by the Dataproc service when the job is submitted.

func (JobPlacementResponseOutput) ElementType

func (JobPlacementResponseOutput) ElementType() reflect.Type

func (JobPlacementResponseOutput) ToJobPlacementResponseOutput

func (o JobPlacementResponseOutput) ToJobPlacementResponseOutput() JobPlacementResponseOutput

func (JobPlacementResponseOutput) ToJobPlacementResponseOutputWithContext

func (o JobPlacementResponseOutput) ToJobPlacementResponseOutputWithContext(ctx context.Context) JobPlacementResponseOutput

type JobReference

type JobReference struct {
	// Optional. The job ID, which must be unique within the project.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.If not specified by the caller, the job ID will be provided by the server.
	JobId *string `pulumi:"jobId"`
	// Optional. The ID of the Google Cloud Platform project that the job belongs to. If specified, must match the request project ID.
	Project *string `pulumi:"project"`
}

Encapsulates the full scoping used to reference a job.

type JobReferenceArgs

type JobReferenceArgs struct {
	// Optional. The job ID, which must be unique within the project.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.If not specified by the caller, the job ID will be provided by the server.
	JobId pulumi.StringPtrInput `pulumi:"jobId"`
	// Optional. The ID of the Google Cloud Platform project that the job belongs to. If specified, must match the request project ID.
	Project pulumi.StringPtrInput `pulumi:"project"`
}

Encapsulates the full scoping used to reference a job.

func (JobReferenceArgs) ElementType

func (JobReferenceArgs) ElementType() reflect.Type

func (JobReferenceArgs) ToJobReferenceOutput

func (i JobReferenceArgs) ToJobReferenceOutput() JobReferenceOutput

func (JobReferenceArgs) ToJobReferenceOutputWithContext

func (i JobReferenceArgs) ToJobReferenceOutputWithContext(ctx context.Context) JobReferenceOutput

func (JobReferenceArgs) ToJobReferencePtrOutput

func (i JobReferenceArgs) ToJobReferencePtrOutput() JobReferencePtrOutput

func (JobReferenceArgs) ToJobReferencePtrOutputWithContext

func (i JobReferenceArgs) ToJobReferencePtrOutputWithContext(ctx context.Context) JobReferencePtrOutput

type JobReferenceInput

type JobReferenceInput interface {
	pulumi.Input

	ToJobReferenceOutput() JobReferenceOutput
	ToJobReferenceOutputWithContext(context.Context) JobReferenceOutput
}

JobReferenceInput is an input type that accepts JobReferenceArgs and JobReferenceOutput values. You can construct a concrete instance of `JobReferenceInput` via:

JobReferenceArgs{...}

type JobReferenceOutput

type JobReferenceOutput struct{ *pulumi.OutputState }

Encapsulates the full scoping used to reference a job.

func (JobReferenceOutput) ElementType

func (JobReferenceOutput) ElementType() reflect.Type

func (JobReferenceOutput) JobId

Optional. The job ID, which must be unique within the project.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.If not specified by the caller, the job ID will be provided by the server.

func (JobReferenceOutput) Project added in v0.3.0

Optional. The ID of the Google Cloud Platform project that the job belongs to. If specified, must match the request project ID.

func (JobReferenceOutput) ToJobReferenceOutput

func (o JobReferenceOutput) ToJobReferenceOutput() JobReferenceOutput

func (JobReferenceOutput) ToJobReferenceOutputWithContext

func (o JobReferenceOutput) ToJobReferenceOutputWithContext(ctx context.Context) JobReferenceOutput

func (JobReferenceOutput) ToJobReferencePtrOutput

func (o JobReferenceOutput) ToJobReferencePtrOutput() JobReferencePtrOutput

func (JobReferenceOutput) ToJobReferencePtrOutputWithContext

func (o JobReferenceOutput) ToJobReferencePtrOutputWithContext(ctx context.Context) JobReferencePtrOutput

type JobReferencePtrInput

type JobReferencePtrInput interface {
	pulumi.Input

	ToJobReferencePtrOutput() JobReferencePtrOutput
	ToJobReferencePtrOutputWithContext(context.Context) JobReferencePtrOutput
}

JobReferencePtrInput is an input type that accepts JobReferenceArgs, JobReferencePtr and JobReferencePtrOutput values. You can construct a concrete instance of `JobReferencePtrInput` via:

        JobReferenceArgs{...}

or:

        nil

type JobReferencePtrOutput

type JobReferencePtrOutput struct{ *pulumi.OutputState }

func (JobReferencePtrOutput) Elem

func (JobReferencePtrOutput) ElementType

func (JobReferencePtrOutput) ElementType() reflect.Type

func (JobReferencePtrOutput) JobId

Optional. The job ID, which must be unique within the project.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.If not specified by the caller, the job ID will be provided by the server.

func (JobReferencePtrOutput) Project added in v0.3.0

Optional. The ID of the Google Cloud Platform project that the job belongs to. If specified, must match the request project ID.

func (JobReferencePtrOutput) ToJobReferencePtrOutput

func (o JobReferencePtrOutput) ToJobReferencePtrOutput() JobReferencePtrOutput

func (JobReferencePtrOutput) ToJobReferencePtrOutputWithContext

func (o JobReferencePtrOutput) ToJobReferencePtrOutputWithContext(ctx context.Context) JobReferencePtrOutput

type JobReferenceResponse

type JobReferenceResponse struct {
	// Optional. The job ID, which must be unique within the project.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.If not specified by the caller, the job ID will be provided by the server.
	JobId string `pulumi:"jobId"`
	// Optional. The ID of the Google Cloud Platform project that the job belongs to. If specified, must match the request project ID.
	Project string `pulumi:"project"`
}

Encapsulates the full scoping used to reference a job.

type JobReferenceResponseOutput

type JobReferenceResponseOutput struct{ *pulumi.OutputState }

Encapsulates the full scoping used to reference a job.

func (JobReferenceResponseOutput) ElementType

func (JobReferenceResponseOutput) ElementType() reflect.Type

func (JobReferenceResponseOutput) JobId

Optional. The job ID, which must be unique within the project.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.If not specified by the caller, the job ID will be provided by the server.

func (JobReferenceResponseOutput) Project added in v0.3.0

Optional. The ID of the Google Cloud Platform project that the job belongs to. If specified, must match the request project ID.

func (JobReferenceResponseOutput) ToJobReferenceResponseOutput

func (o JobReferenceResponseOutput) ToJobReferenceResponseOutput() JobReferenceResponseOutput

func (JobReferenceResponseOutput) ToJobReferenceResponseOutputWithContext

func (o JobReferenceResponseOutput) ToJobReferenceResponseOutputWithContext(ctx context.Context) JobReferenceResponseOutput

type JobScheduling

type JobScheduling struct {
	// Optional. Maximum number of times per hour a driver can be restarted as a result of driver exiting with non-zero code before job is reported failed.A job might be reported as thrashing if the driver exits with a non-zero code four times within a 10-minute window.Maximum value is 10.Note: This restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).
	MaxFailuresPerHour *int `pulumi:"maxFailuresPerHour"`
	// Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).
	MaxFailuresTotal *int `pulumi:"maxFailuresTotal"`
}

Job scheduling options.

type JobSchedulingArgs

type JobSchedulingArgs struct {
	// Optional. Maximum number of times per hour a driver can be restarted as a result of driver exiting with non-zero code before job is reported failed.A job might be reported as thrashing if the driver exits with a non-zero code four times within a 10-minute window.Maximum value is 10.Note: This restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).
	MaxFailuresPerHour pulumi.IntPtrInput `pulumi:"maxFailuresPerHour"`
	// Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).
	MaxFailuresTotal pulumi.IntPtrInput `pulumi:"maxFailuresTotal"`
}

Job scheduling options.

func (JobSchedulingArgs) ElementType

func (JobSchedulingArgs) ElementType() reflect.Type

func (JobSchedulingArgs) ToJobSchedulingOutput

func (i JobSchedulingArgs) ToJobSchedulingOutput() JobSchedulingOutput

func (JobSchedulingArgs) ToJobSchedulingOutputWithContext

func (i JobSchedulingArgs) ToJobSchedulingOutputWithContext(ctx context.Context) JobSchedulingOutput

func (JobSchedulingArgs) ToJobSchedulingPtrOutput

func (i JobSchedulingArgs) ToJobSchedulingPtrOutput() JobSchedulingPtrOutput

func (JobSchedulingArgs) ToJobSchedulingPtrOutputWithContext

func (i JobSchedulingArgs) ToJobSchedulingPtrOutputWithContext(ctx context.Context) JobSchedulingPtrOutput

type JobSchedulingInput

type JobSchedulingInput interface {
	pulumi.Input

	ToJobSchedulingOutput() JobSchedulingOutput
	ToJobSchedulingOutputWithContext(context.Context) JobSchedulingOutput
}

JobSchedulingInput is an input type that accepts JobSchedulingArgs and JobSchedulingOutput values. You can construct a concrete instance of `JobSchedulingInput` via:

JobSchedulingArgs{...}

type JobSchedulingOutput

type JobSchedulingOutput struct{ *pulumi.OutputState }

Job scheduling options.

func (JobSchedulingOutput) ElementType

func (JobSchedulingOutput) ElementType() reflect.Type

func (JobSchedulingOutput) MaxFailuresPerHour

func (o JobSchedulingOutput) MaxFailuresPerHour() pulumi.IntPtrOutput

Optional. Maximum number of times per hour a driver can be restarted as a result of driver exiting with non-zero code before job is reported failed.A job might be reported as thrashing if the driver exits with a non-zero code four times within a 10-minute window.Maximum value is 10.Note: This restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).

func (JobSchedulingOutput) MaxFailuresTotal

func (o JobSchedulingOutput) MaxFailuresTotal() pulumi.IntPtrOutput

Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).

func (JobSchedulingOutput) ToJobSchedulingOutput

func (o JobSchedulingOutput) ToJobSchedulingOutput() JobSchedulingOutput

func (JobSchedulingOutput) ToJobSchedulingOutputWithContext

func (o JobSchedulingOutput) ToJobSchedulingOutputWithContext(ctx context.Context) JobSchedulingOutput

func (JobSchedulingOutput) ToJobSchedulingPtrOutput

func (o JobSchedulingOutput) ToJobSchedulingPtrOutput() JobSchedulingPtrOutput

func (JobSchedulingOutput) ToJobSchedulingPtrOutputWithContext

func (o JobSchedulingOutput) ToJobSchedulingPtrOutputWithContext(ctx context.Context) JobSchedulingPtrOutput

type JobSchedulingPtrInput

type JobSchedulingPtrInput interface {
	pulumi.Input

	ToJobSchedulingPtrOutput() JobSchedulingPtrOutput
	ToJobSchedulingPtrOutputWithContext(context.Context) JobSchedulingPtrOutput
}

JobSchedulingPtrInput is an input type that accepts JobSchedulingArgs, JobSchedulingPtr and JobSchedulingPtrOutput values. You can construct a concrete instance of `JobSchedulingPtrInput` via:

        JobSchedulingArgs{...}

or:

        nil

type JobSchedulingPtrOutput

type JobSchedulingPtrOutput struct{ *pulumi.OutputState }

func (JobSchedulingPtrOutput) Elem

func (JobSchedulingPtrOutput) ElementType

func (JobSchedulingPtrOutput) ElementType() reflect.Type

func (JobSchedulingPtrOutput) MaxFailuresPerHour

func (o JobSchedulingPtrOutput) MaxFailuresPerHour() pulumi.IntPtrOutput

Optional. Maximum number of times per hour a driver can be restarted as a result of driver exiting with non-zero code before job is reported failed.A job might be reported as thrashing if the driver exits with a non-zero code four times within a 10-minute window.Maximum value is 10.Note: This restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).

func (JobSchedulingPtrOutput) MaxFailuresTotal

func (o JobSchedulingPtrOutput) MaxFailuresTotal() pulumi.IntPtrOutput

Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).

func (JobSchedulingPtrOutput) ToJobSchedulingPtrOutput

func (o JobSchedulingPtrOutput) ToJobSchedulingPtrOutput() JobSchedulingPtrOutput

func (JobSchedulingPtrOutput) ToJobSchedulingPtrOutputWithContext

func (o JobSchedulingPtrOutput) ToJobSchedulingPtrOutputWithContext(ctx context.Context) JobSchedulingPtrOutput

type JobSchedulingResponse

type JobSchedulingResponse struct {
	// Optional. Maximum number of times per hour a driver can be restarted as a result of driver exiting with non-zero code before job is reported failed.A job might be reported as thrashing if the driver exits with a non-zero code four times within a 10-minute window.Maximum value is 10.Note: This restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).
	MaxFailuresPerHour int `pulumi:"maxFailuresPerHour"`
	// Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).
	MaxFailuresTotal int `pulumi:"maxFailuresTotal"`
}

Job scheduling options.

type JobSchedulingResponseOutput

type JobSchedulingResponseOutput struct{ *pulumi.OutputState }

Job scheduling options.

func (JobSchedulingResponseOutput) ElementType

func (JobSchedulingResponseOutput) MaxFailuresPerHour

func (o JobSchedulingResponseOutput) MaxFailuresPerHour() pulumi.IntOutput

Optional. Maximum number of times per hour a driver can be restarted as a result of driver exiting with non-zero code before job is reported failed.A job might be reported as thrashing if the driver exits with a non-zero code four times within a 10-minute window.Maximum value is 10.Note: This restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).

func (JobSchedulingResponseOutput) MaxFailuresTotal

func (o JobSchedulingResponseOutput) MaxFailuresTotal() pulumi.IntOutput

Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).

func (JobSchedulingResponseOutput) ToJobSchedulingResponseOutput

func (o JobSchedulingResponseOutput) ToJobSchedulingResponseOutput() JobSchedulingResponseOutput

func (JobSchedulingResponseOutput) ToJobSchedulingResponseOutputWithContext

func (o JobSchedulingResponseOutput) ToJobSchedulingResponseOutputWithContext(ctx context.Context) JobSchedulingResponseOutput

type JobState added in v0.3.0

type JobState struct {
}

func (JobState) ElementType added in v0.3.0

func (JobState) ElementType() reflect.Type

type JobStatusResponse

type JobStatusResponse struct {
	// Optional. Output only. Job state details, such as an error description if the state is ERROR.
	Details string `pulumi:"details"`
	// A state message specifying the overall job state.
	State string `pulumi:"state"`
	// The time when this state was entered.
	StateStartTime string `pulumi:"stateStartTime"`
	// Additional state information, which includes status reported by the agent.
	Substate string `pulumi:"substate"`
}

Dataproc job status.

type JobStatusResponseArrayOutput

type JobStatusResponseArrayOutput struct{ *pulumi.OutputState }

func (JobStatusResponseArrayOutput) ElementType

func (JobStatusResponseArrayOutput) Index

func (JobStatusResponseArrayOutput) ToJobStatusResponseArrayOutput

func (o JobStatusResponseArrayOutput) ToJobStatusResponseArrayOutput() JobStatusResponseArrayOutput

func (JobStatusResponseArrayOutput) ToJobStatusResponseArrayOutputWithContext

func (o JobStatusResponseArrayOutput) ToJobStatusResponseArrayOutputWithContext(ctx context.Context) JobStatusResponseArrayOutput

type JobStatusResponseOutput

type JobStatusResponseOutput struct{ *pulumi.OutputState }

Dataproc job status.

func (JobStatusResponseOutput) Details

Optional. Output only. Job state details, such as an error description if the state is ERROR.

func (JobStatusResponseOutput) ElementType

func (JobStatusResponseOutput) ElementType() reflect.Type

func (JobStatusResponseOutput) State

A state message specifying the overall job state.

func (JobStatusResponseOutput) StateStartTime

func (o JobStatusResponseOutput) StateStartTime() pulumi.StringOutput

The time when this state was entered.

func (JobStatusResponseOutput) Substate

Additional state information, which includes status reported by the agent.

func (JobStatusResponseOutput) ToJobStatusResponseOutput

func (o JobStatusResponseOutput) ToJobStatusResponseOutput() JobStatusResponseOutput

func (JobStatusResponseOutput) ToJobStatusResponseOutputWithContext

func (o JobStatusResponseOutput) ToJobStatusResponseOutputWithContext(ctx context.Context) JobStatusResponseOutput

type JupyterConfig added in v0.32.0

type JupyterConfig struct {
	// Optional. Display name, shown in the Jupyter kernelspec card.
	DisplayName *string `pulumi:"displayName"`
	// Optional. Kernel
	Kernel *JupyterConfigKernel `pulumi:"kernel"`
}

Jupyter configuration for an interactive session.

type JupyterConfigArgs added in v0.32.0

type JupyterConfigArgs struct {
	// Optional. Display name, shown in the Jupyter kernelspec card.
	DisplayName pulumi.StringPtrInput `pulumi:"displayName"`
	// Optional. Kernel
	Kernel JupyterConfigKernelPtrInput `pulumi:"kernel"`
}

Jupyter configuration for an interactive session.

func (JupyterConfigArgs) ElementType added in v0.32.0

func (JupyterConfigArgs) ElementType() reflect.Type

func (JupyterConfigArgs) ToJupyterConfigOutput added in v0.32.0

func (i JupyterConfigArgs) ToJupyterConfigOutput() JupyterConfigOutput

func (JupyterConfigArgs) ToJupyterConfigOutputWithContext added in v0.32.0

func (i JupyterConfigArgs) ToJupyterConfigOutputWithContext(ctx context.Context) JupyterConfigOutput

func (JupyterConfigArgs) ToJupyterConfigPtrOutput added in v0.32.0

func (i JupyterConfigArgs) ToJupyterConfigPtrOutput() JupyterConfigPtrOutput

func (JupyterConfigArgs) ToJupyterConfigPtrOutputWithContext added in v0.32.0

func (i JupyterConfigArgs) ToJupyterConfigPtrOutputWithContext(ctx context.Context) JupyterConfigPtrOutput

type JupyterConfigInput added in v0.32.0

type JupyterConfigInput interface {
	pulumi.Input

	ToJupyterConfigOutput() JupyterConfigOutput
	ToJupyterConfigOutputWithContext(context.Context) JupyterConfigOutput
}

JupyterConfigInput is an input type that accepts JupyterConfigArgs and JupyterConfigOutput values. You can construct a concrete instance of `JupyterConfigInput` via:

JupyterConfigArgs{...}

type JupyterConfigKernel added in v0.32.0

type JupyterConfigKernel string

Optional. Kernel

func (JupyterConfigKernel) ElementType added in v0.32.0

func (JupyterConfigKernel) ElementType() reflect.Type

func (JupyterConfigKernel) ToJupyterConfigKernelOutput added in v0.32.0

func (e JupyterConfigKernel) ToJupyterConfigKernelOutput() JupyterConfigKernelOutput

func (JupyterConfigKernel) ToJupyterConfigKernelOutputWithContext added in v0.32.0

func (e JupyterConfigKernel) ToJupyterConfigKernelOutputWithContext(ctx context.Context) JupyterConfigKernelOutput

func (JupyterConfigKernel) ToJupyterConfigKernelPtrOutput added in v0.32.0

func (e JupyterConfigKernel) ToJupyterConfigKernelPtrOutput() JupyterConfigKernelPtrOutput

func (JupyterConfigKernel) ToJupyterConfigKernelPtrOutputWithContext added in v0.32.0

func (e JupyterConfigKernel) ToJupyterConfigKernelPtrOutputWithContext(ctx context.Context) JupyterConfigKernelPtrOutput

func (JupyterConfigKernel) ToStringOutput added in v0.32.0

func (e JupyterConfigKernel) ToStringOutput() pulumi.StringOutput

func (JupyterConfigKernel) ToStringOutputWithContext added in v0.32.0

func (e JupyterConfigKernel) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput

func (JupyterConfigKernel) ToStringPtrOutput added in v0.32.0

func (e JupyterConfigKernel) ToStringPtrOutput() pulumi.StringPtrOutput

func (JupyterConfigKernel) ToStringPtrOutputWithContext added in v0.32.0

func (e JupyterConfigKernel) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput

type JupyterConfigKernelInput added in v0.32.0

type JupyterConfigKernelInput interface {
	pulumi.Input

	ToJupyterConfigKernelOutput() JupyterConfigKernelOutput
	ToJupyterConfigKernelOutputWithContext(context.Context) JupyterConfigKernelOutput
}

JupyterConfigKernelInput is an input type that accepts JupyterConfigKernelArgs and JupyterConfigKernelOutput values. You can construct a concrete instance of `JupyterConfigKernelInput` via:

JupyterConfigKernelArgs{...}

type JupyterConfigKernelOutput added in v0.32.0

type JupyterConfigKernelOutput struct{ *pulumi.OutputState }

func (JupyterConfigKernelOutput) ElementType added in v0.32.0

func (JupyterConfigKernelOutput) ElementType() reflect.Type

func (JupyterConfigKernelOutput) ToJupyterConfigKernelOutput added in v0.32.0

func (o JupyterConfigKernelOutput) ToJupyterConfigKernelOutput() JupyterConfigKernelOutput

func (JupyterConfigKernelOutput) ToJupyterConfigKernelOutputWithContext added in v0.32.0

func (o JupyterConfigKernelOutput) ToJupyterConfigKernelOutputWithContext(ctx context.Context) JupyterConfigKernelOutput

func (JupyterConfigKernelOutput) ToJupyterConfigKernelPtrOutput added in v0.32.0

func (o JupyterConfigKernelOutput) ToJupyterConfigKernelPtrOutput() JupyterConfigKernelPtrOutput

func (JupyterConfigKernelOutput) ToJupyterConfigKernelPtrOutputWithContext added in v0.32.0

func (o JupyterConfigKernelOutput) ToJupyterConfigKernelPtrOutputWithContext(ctx context.Context) JupyterConfigKernelPtrOutput

func (JupyterConfigKernelOutput) ToStringOutput added in v0.32.0

func (o JupyterConfigKernelOutput) ToStringOutput() pulumi.StringOutput

func (JupyterConfigKernelOutput) ToStringOutputWithContext added in v0.32.0

func (o JupyterConfigKernelOutput) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput

func (JupyterConfigKernelOutput) ToStringPtrOutput added in v0.32.0

func (o JupyterConfigKernelOutput) ToStringPtrOutput() pulumi.StringPtrOutput

func (JupyterConfigKernelOutput) ToStringPtrOutputWithContext added in v0.32.0

func (o JupyterConfigKernelOutput) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput

type JupyterConfigKernelPtrInput added in v0.32.0

type JupyterConfigKernelPtrInput interface {
	pulumi.Input

	ToJupyterConfigKernelPtrOutput() JupyterConfigKernelPtrOutput
	ToJupyterConfigKernelPtrOutputWithContext(context.Context) JupyterConfigKernelPtrOutput
}

func JupyterConfigKernelPtr added in v0.32.0

func JupyterConfigKernelPtr(v string) JupyterConfigKernelPtrInput

type JupyterConfigKernelPtrOutput added in v0.32.0

type JupyterConfigKernelPtrOutput struct{ *pulumi.OutputState }

func (JupyterConfigKernelPtrOutput) Elem added in v0.32.0

func (JupyterConfigKernelPtrOutput) ElementType added in v0.32.0

func (JupyterConfigKernelPtrOutput) ToJupyterConfigKernelPtrOutput added in v0.32.0

func (o JupyterConfigKernelPtrOutput) ToJupyterConfigKernelPtrOutput() JupyterConfigKernelPtrOutput

func (JupyterConfigKernelPtrOutput) ToJupyterConfigKernelPtrOutputWithContext added in v0.32.0

func (o JupyterConfigKernelPtrOutput) ToJupyterConfigKernelPtrOutputWithContext(ctx context.Context) JupyterConfigKernelPtrOutput

func (JupyterConfigKernelPtrOutput) ToStringPtrOutput added in v0.32.0

func (o JupyterConfigKernelPtrOutput) ToStringPtrOutput() pulumi.StringPtrOutput

func (JupyterConfigKernelPtrOutput) ToStringPtrOutputWithContext added in v0.32.0

func (o JupyterConfigKernelPtrOutput) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput

type JupyterConfigOutput added in v0.32.0

type JupyterConfigOutput struct{ *pulumi.OutputState }

Jupyter configuration for an interactive session.

func (JupyterConfigOutput) DisplayName added in v0.32.0

func (o JupyterConfigOutput) DisplayName() pulumi.StringPtrOutput

Optional. Display name, shown in the Jupyter kernelspec card.

func (JupyterConfigOutput) ElementType added in v0.32.0

func (JupyterConfigOutput) ElementType() reflect.Type

func (JupyterConfigOutput) Kernel added in v0.32.0

Optional. Kernel

func (JupyterConfigOutput) ToJupyterConfigOutput added in v0.32.0

func (o JupyterConfigOutput) ToJupyterConfigOutput() JupyterConfigOutput

func (JupyterConfigOutput) ToJupyterConfigOutputWithContext added in v0.32.0

func (o JupyterConfigOutput) ToJupyterConfigOutputWithContext(ctx context.Context) JupyterConfigOutput

func (JupyterConfigOutput) ToJupyterConfigPtrOutput added in v0.32.0

func (o JupyterConfigOutput) ToJupyterConfigPtrOutput() JupyterConfigPtrOutput

func (JupyterConfigOutput) ToJupyterConfigPtrOutputWithContext added in v0.32.0

func (o JupyterConfigOutput) ToJupyterConfigPtrOutputWithContext(ctx context.Context) JupyterConfigPtrOutput

type JupyterConfigPtrInput added in v0.32.0

type JupyterConfigPtrInput interface {
	pulumi.Input

	ToJupyterConfigPtrOutput() JupyterConfigPtrOutput
	ToJupyterConfigPtrOutputWithContext(context.Context) JupyterConfigPtrOutput
}

JupyterConfigPtrInput is an input type that accepts JupyterConfigArgs, JupyterConfigPtr and JupyterConfigPtrOutput values. You can construct a concrete instance of `JupyterConfigPtrInput` via:

        JupyterConfigArgs{...}

or:

        nil

func JupyterConfigPtr added in v0.32.0

func JupyterConfigPtr(v *JupyterConfigArgs) JupyterConfigPtrInput

type JupyterConfigPtrOutput added in v0.32.0

type JupyterConfigPtrOutput struct{ *pulumi.OutputState }

func (JupyterConfigPtrOutput) DisplayName added in v0.32.0

Optional. Display name, shown in the Jupyter kernelspec card.

func (JupyterConfigPtrOutput) Elem added in v0.32.0

func (JupyterConfigPtrOutput) ElementType added in v0.32.0

func (JupyterConfigPtrOutput) ElementType() reflect.Type

func (JupyterConfigPtrOutput) Kernel added in v0.32.0

Optional. Kernel

func (JupyterConfigPtrOutput) ToJupyterConfigPtrOutput added in v0.32.0

func (o JupyterConfigPtrOutput) ToJupyterConfigPtrOutput() JupyterConfigPtrOutput

func (JupyterConfigPtrOutput) ToJupyterConfigPtrOutputWithContext added in v0.32.0

func (o JupyterConfigPtrOutput) ToJupyterConfigPtrOutputWithContext(ctx context.Context) JupyterConfigPtrOutput

type JupyterConfigResponse added in v0.32.0

type JupyterConfigResponse struct {
	// Optional. Display name, shown in the Jupyter kernelspec card.
	DisplayName string `pulumi:"displayName"`
	// Optional. Kernel
	Kernel string `pulumi:"kernel"`
}

Jupyter configuration for an interactive session.

type JupyterConfigResponseOutput added in v0.32.0

type JupyterConfigResponseOutput struct{ *pulumi.OutputState }

Jupyter configuration for an interactive session.

func (JupyterConfigResponseOutput) DisplayName added in v0.32.0

Optional. Display name, shown in the Jupyter kernelspec card.

func (JupyterConfigResponseOutput) ElementType added in v0.32.0

func (JupyterConfigResponseOutput) Kernel added in v0.32.0

Optional. Kernel

func (JupyterConfigResponseOutput) ToJupyterConfigResponseOutput added in v0.32.0

func (o JupyterConfigResponseOutput) ToJupyterConfigResponseOutput() JupyterConfigResponseOutput

func (JupyterConfigResponseOutput) ToJupyterConfigResponseOutputWithContext added in v0.32.0

func (o JupyterConfigResponseOutput) ToJupyterConfigResponseOutputWithContext(ctx context.Context) JupyterConfigResponseOutput

type KerberosConfig

type KerberosConfig struct {
	// Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustAdminServer *string `pulumi:"crossRealmTrustAdminServer"`
	// Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustKdc *string `pulumi:"crossRealmTrustKdc"`
	// Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
	CrossRealmTrustRealm *string `pulumi:"crossRealmTrustRealm"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
	CrossRealmTrustSharedPasswordUri *string `pulumi:"crossRealmTrustSharedPasswordUri"`
	// Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
	EnableKerberos *bool `pulumi:"enableKerberos"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
	KdcDbKeyUri *string `pulumi:"kdcDbKeyUri"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
	KeyPasswordUri *string `pulumi:"keyPasswordUri"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
	KeystorePasswordUri *string `pulumi:"keystorePasswordUri"`
	// Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
	KeystoreUri *string `pulumi:"keystoreUri"`
	// Optional. The uri of the KMS key used to encrypt various sensitive files.
	KmsKeyUri *string `pulumi:"kmsKeyUri"`
	// Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
	Realm *string `pulumi:"realm"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.
	RootPrincipalPasswordUri *string `pulumi:"rootPrincipalPasswordUri"`
	// Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
	TgtLifetimeHours *int `pulumi:"tgtLifetimeHours"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
	TruststorePasswordUri *string `pulumi:"truststorePasswordUri"`
	// Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
	TruststoreUri *string `pulumi:"truststoreUri"`
}

Specifies Kerberos related configuration.

type KerberosConfigArgs

type KerberosConfigArgs struct {
	// Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustAdminServer pulumi.StringPtrInput `pulumi:"crossRealmTrustAdminServer"`
	// Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustKdc pulumi.StringPtrInput `pulumi:"crossRealmTrustKdc"`
	// Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
	CrossRealmTrustRealm pulumi.StringPtrInput `pulumi:"crossRealmTrustRealm"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
	CrossRealmTrustSharedPasswordUri pulumi.StringPtrInput `pulumi:"crossRealmTrustSharedPasswordUri"`
	// Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
	EnableKerberos pulumi.BoolPtrInput `pulumi:"enableKerberos"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
	KdcDbKeyUri pulumi.StringPtrInput `pulumi:"kdcDbKeyUri"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
	KeyPasswordUri pulumi.StringPtrInput `pulumi:"keyPasswordUri"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
	KeystorePasswordUri pulumi.StringPtrInput `pulumi:"keystorePasswordUri"`
	// Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
	KeystoreUri pulumi.StringPtrInput `pulumi:"keystoreUri"`
	// Optional. The uri of the KMS key used to encrypt various sensitive files.
	KmsKeyUri pulumi.StringPtrInput `pulumi:"kmsKeyUri"`
	// Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
	Realm pulumi.StringPtrInput `pulumi:"realm"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.
	RootPrincipalPasswordUri pulumi.StringPtrInput `pulumi:"rootPrincipalPasswordUri"`
	// Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
	TgtLifetimeHours pulumi.IntPtrInput `pulumi:"tgtLifetimeHours"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
	TruststorePasswordUri pulumi.StringPtrInput `pulumi:"truststorePasswordUri"`
	// Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
	TruststoreUri pulumi.StringPtrInput `pulumi:"truststoreUri"`
}

Specifies Kerberos related configuration.

func (KerberosConfigArgs) ElementType

func (KerberosConfigArgs) ElementType() reflect.Type

func (KerberosConfigArgs) ToKerberosConfigOutput

func (i KerberosConfigArgs) ToKerberosConfigOutput() KerberosConfigOutput

func (KerberosConfigArgs) ToKerberosConfigOutputWithContext

func (i KerberosConfigArgs) ToKerberosConfigOutputWithContext(ctx context.Context) KerberosConfigOutput

func (KerberosConfigArgs) ToKerberosConfigPtrOutput

func (i KerberosConfigArgs) ToKerberosConfigPtrOutput() KerberosConfigPtrOutput

func (KerberosConfigArgs) ToKerberosConfigPtrOutputWithContext

func (i KerberosConfigArgs) ToKerberosConfigPtrOutputWithContext(ctx context.Context) KerberosConfigPtrOutput

type KerberosConfigInput

type KerberosConfigInput interface {
	pulumi.Input

	ToKerberosConfigOutput() KerberosConfigOutput
	ToKerberosConfigOutputWithContext(context.Context) KerberosConfigOutput
}

KerberosConfigInput is an input type that accepts KerberosConfigArgs and KerberosConfigOutput values. You can construct a concrete instance of `KerberosConfigInput` via:

KerberosConfigArgs{...}

type KerberosConfigOutput

type KerberosConfigOutput struct{ *pulumi.OutputState }

Specifies Kerberos related configuration.

func (KerberosConfigOutput) CrossRealmTrustAdminServer

func (o KerberosConfigOutput) CrossRealmTrustAdminServer() pulumi.StringPtrOutput

Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (KerberosConfigOutput) CrossRealmTrustKdc

func (o KerberosConfigOutput) CrossRealmTrustKdc() pulumi.StringPtrOutput

Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (KerberosConfigOutput) CrossRealmTrustRealm

func (o KerberosConfigOutput) CrossRealmTrustRealm() pulumi.StringPtrOutput

Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.

func (KerberosConfigOutput) CrossRealmTrustSharedPasswordUri

func (o KerberosConfigOutput) CrossRealmTrustSharedPasswordUri() pulumi.StringPtrOutput

Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.

func (KerberosConfigOutput) ElementType

func (KerberosConfigOutput) ElementType() reflect.Type

func (KerberosConfigOutput) EnableKerberos

func (o KerberosConfigOutput) EnableKerberos() pulumi.BoolPtrOutput

Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.

func (KerberosConfigOutput) KdcDbKeyUri

Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.

func (KerberosConfigOutput) KeyPasswordUri

func (o KerberosConfigOutput) KeyPasswordUri() pulumi.StringPtrOutput

Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.

func (KerberosConfigOutput) KeystorePasswordUri

func (o KerberosConfigOutput) KeystorePasswordUri() pulumi.StringPtrOutput

Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.

func (KerberosConfigOutput) KeystoreUri

Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

func (KerberosConfigOutput) KmsKeyUri

Optional. The uri of the KMS key used to encrypt various sensitive files.

func (KerberosConfigOutput) Realm

Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.

func (KerberosConfigOutput) RootPrincipalPasswordUri

func (o KerberosConfigOutput) RootPrincipalPasswordUri() pulumi.StringPtrOutput

Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.

func (KerberosConfigOutput) TgtLifetimeHours

func (o KerberosConfigOutput) TgtLifetimeHours() pulumi.IntPtrOutput

Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.

func (KerberosConfigOutput) ToKerberosConfigOutput

func (o KerberosConfigOutput) ToKerberosConfigOutput() KerberosConfigOutput

func (KerberosConfigOutput) ToKerberosConfigOutputWithContext

func (o KerberosConfigOutput) ToKerberosConfigOutputWithContext(ctx context.Context) KerberosConfigOutput

func (KerberosConfigOutput) ToKerberosConfigPtrOutput

func (o KerberosConfigOutput) ToKerberosConfigPtrOutput() KerberosConfigPtrOutput

func (KerberosConfigOutput) ToKerberosConfigPtrOutputWithContext

func (o KerberosConfigOutput) ToKerberosConfigPtrOutputWithContext(ctx context.Context) KerberosConfigPtrOutput

func (KerberosConfigOutput) TruststorePasswordUri

func (o KerberosConfigOutput) TruststorePasswordUri() pulumi.StringPtrOutput

Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.

func (KerberosConfigOutput) TruststoreUri

func (o KerberosConfigOutput) TruststoreUri() pulumi.StringPtrOutput

Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

type KerberosConfigPtrInput

type KerberosConfigPtrInput interface {
	pulumi.Input

	ToKerberosConfigPtrOutput() KerberosConfigPtrOutput
	ToKerberosConfigPtrOutputWithContext(context.Context) KerberosConfigPtrOutput
}

KerberosConfigPtrInput is an input type that accepts KerberosConfigArgs, KerberosConfigPtr and KerberosConfigPtrOutput values. You can construct a concrete instance of `KerberosConfigPtrInput` via:

        KerberosConfigArgs{...}

or:

        nil

type KerberosConfigPtrOutput

type KerberosConfigPtrOutput struct{ *pulumi.OutputState }

func (KerberosConfigPtrOutput) CrossRealmTrustAdminServer

func (o KerberosConfigPtrOutput) CrossRealmTrustAdminServer() pulumi.StringPtrOutput

Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (KerberosConfigPtrOutput) CrossRealmTrustKdc

func (o KerberosConfigPtrOutput) CrossRealmTrustKdc() pulumi.StringPtrOutput

Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (KerberosConfigPtrOutput) CrossRealmTrustRealm

func (o KerberosConfigPtrOutput) CrossRealmTrustRealm() pulumi.StringPtrOutput

Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.

func (KerberosConfigPtrOutput) CrossRealmTrustSharedPasswordUri

func (o KerberosConfigPtrOutput) CrossRealmTrustSharedPasswordUri() pulumi.StringPtrOutput

Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.

func (KerberosConfigPtrOutput) Elem

func (KerberosConfigPtrOutput) ElementType

func (KerberosConfigPtrOutput) ElementType() reflect.Type

func (KerberosConfigPtrOutput) EnableKerberos

func (o KerberosConfigPtrOutput) EnableKerberos() pulumi.BoolPtrOutput

Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.

func (KerberosConfigPtrOutput) KdcDbKeyUri

Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.

func (KerberosConfigPtrOutput) KeyPasswordUri

func (o KerberosConfigPtrOutput) KeyPasswordUri() pulumi.StringPtrOutput

Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.

func (KerberosConfigPtrOutput) KeystorePasswordUri

func (o KerberosConfigPtrOutput) KeystorePasswordUri() pulumi.StringPtrOutput

Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.

func (KerberosConfigPtrOutput) KeystoreUri

Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

func (KerberosConfigPtrOutput) KmsKeyUri

Optional. The uri of the KMS key used to encrypt various sensitive files.

func (KerberosConfigPtrOutput) Realm

Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.

func (KerberosConfigPtrOutput) RootPrincipalPasswordUri

func (o KerberosConfigPtrOutput) RootPrincipalPasswordUri() pulumi.StringPtrOutput

Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.

func (KerberosConfigPtrOutput) TgtLifetimeHours

func (o KerberosConfigPtrOutput) TgtLifetimeHours() pulumi.IntPtrOutput

Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.

func (KerberosConfigPtrOutput) ToKerberosConfigPtrOutput

func (o KerberosConfigPtrOutput) ToKerberosConfigPtrOutput() KerberosConfigPtrOutput

func (KerberosConfigPtrOutput) ToKerberosConfigPtrOutputWithContext

func (o KerberosConfigPtrOutput) ToKerberosConfigPtrOutputWithContext(ctx context.Context) KerberosConfigPtrOutput

func (KerberosConfigPtrOutput) TruststorePasswordUri

func (o KerberosConfigPtrOutput) TruststorePasswordUri() pulumi.StringPtrOutput

Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.

func (KerberosConfigPtrOutput) TruststoreUri

Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

type KerberosConfigResponse

type KerberosConfigResponse struct {
	// Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustAdminServer string `pulumi:"crossRealmTrustAdminServer"`
	// Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustKdc string `pulumi:"crossRealmTrustKdc"`
	// Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
	CrossRealmTrustRealm string `pulumi:"crossRealmTrustRealm"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
	CrossRealmTrustSharedPasswordUri string `pulumi:"crossRealmTrustSharedPasswordUri"`
	// Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
	EnableKerberos bool `pulumi:"enableKerberos"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
	KdcDbKeyUri string `pulumi:"kdcDbKeyUri"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
	KeyPasswordUri string `pulumi:"keyPasswordUri"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
	KeystorePasswordUri string `pulumi:"keystorePasswordUri"`
	// Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
	KeystoreUri string `pulumi:"keystoreUri"`
	// Optional. The uri of the KMS key used to encrypt various sensitive files.
	KmsKeyUri string `pulumi:"kmsKeyUri"`
	// Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
	Realm string `pulumi:"realm"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.
	RootPrincipalPasswordUri string `pulumi:"rootPrincipalPasswordUri"`
	// Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
	TgtLifetimeHours int `pulumi:"tgtLifetimeHours"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
	TruststorePasswordUri string `pulumi:"truststorePasswordUri"`
	// Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
	TruststoreUri string `pulumi:"truststoreUri"`
}

Specifies Kerberos related configuration.

type KerberosConfigResponseOutput

type KerberosConfigResponseOutput struct{ *pulumi.OutputState }

Specifies Kerberos related configuration.

func (KerberosConfigResponseOutput) CrossRealmTrustAdminServer

func (o KerberosConfigResponseOutput) CrossRealmTrustAdminServer() pulumi.StringOutput

Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (KerberosConfigResponseOutput) CrossRealmTrustKdc

func (o KerberosConfigResponseOutput) CrossRealmTrustKdc() pulumi.StringOutput

Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (KerberosConfigResponseOutput) CrossRealmTrustRealm

func (o KerberosConfigResponseOutput) CrossRealmTrustRealm() pulumi.StringOutput

Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.

func (KerberosConfigResponseOutput) CrossRealmTrustSharedPasswordUri

func (o KerberosConfigResponseOutput) CrossRealmTrustSharedPasswordUri() pulumi.StringOutput

Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.

func (KerberosConfigResponseOutput) ElementType

func (KerberosConfigResponseOutput) EnableKerberos

func (o KerberosConfigResponseOutput) EnableKerberos() pulumi.BoolOutput

Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.

func (KerberosConfigResponseOutput) KdcDbKeyUri

Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.

func (KerberosConfigResponseOutput) KeyPasswordUri

Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.

func (KerberosConfigResponseOutput) KeystorePasswordUri

func (o KerberosConfigResponseOutput) KeystorePasswordUri() pulumi.StringOutput

Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.

func (KerberosConfigResponseOutput) KeystoreUri

Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

func (KerberosConfigResponseOutput) KmsKeyUri

Optional. The uri of the KMS key used to encrypt various sensitive files.

func (KerberosConfigResponseOutput) Realm

Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.

func (KerberosConfigResponseOutput) RootPrincipalPasswordUri

func (o KerberosConfigResponseOutput) RootPrincipalPasswordUri() pulumi.StringOutput

Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.

func (KerberosConfigResponseOutput) TgtLifetimeHours

func (o KerberosConfigResponseOutput) TgtLifetimeHours() pulumi.IntOutput

Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.

func (KerberosConfigResponseOutput) ToKerberosConfigResponseOutput

func (o KerberosConfigResponseOutput) ToKerberosConfigResponseOutput() KerberosConfigResponseOutput

func (KerberosConfigResponseOutput) ToKerberosConfigResponseOutputWithContext

func (o KerberosConfigResponseOutput) ToKerberosConfigResponseOutputWithContext(ctx context.Context) KerberosConfigResponseOutput

func (KerberosConfigResponseOutput) TruststorePasswordUri

func (o KerberosConfigResponseOutput) TruststorePasswordUri() pulumi.StringOutput

Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.

func (KerberosConfigResponseOutput) TruststoreUri

Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

type KubernetesClusterConfig added in v0.18.2

type KubernetesClusterConfig struct {
	// The configuration for running the Dataproc cluster on GKE.
	GkeClusterConfig GkeClusterConfig `pulumi:"gkeClusterConfig"`
	// Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.
	KubernetesNamespace *string `pulumi:"kubernetesNamespace"`
	// Optional. The software configuration for this Dataproc cluster running on Kubernetes.
	KubernetesSoftwareConfig *KubernetesSoftwareConfig `pulumi:"kubernetesSoftwareConfig"`
}

The configuration for running the Dataproc cluster on Kubernetes.

type KubernetesClusterConfigArgs added in v0.18.2

type KubernetesClusterConfigArgs struct {
	// The configuration for running the Dataproc cluster on GKE.
	GkeClusterConfig GkeClusterConfigInput `pulumi:"gkeClusterConfig"`
	// Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.
	KubernetesNamespace pulumi.StringPtrInput `pulumi:"kubernetesNamespace"`
	// Optional. The software configuration for this Dataproc cluster running on Kubernetes.
	KubernetesSoftwareConfig KubernetesSoftwareConfigPtrInput `pulumi:"kubernetesSoftwareConfig"`
}

The configuration for running the Dataproc cluster on Kubernetes.

func (KubernetesClusterConfigArgs) ElementType added in v0.18.2

func (KubernetesClusterConfigArgs) ToKubernetesClusterConfigOutput added in v0.18.2

func (i KubernetesClusterConfigArgs) ToKubernetesClusterConfigOutput() KubernetesClusterConfigOutput

func (KubernetesClusterConfigArgs) ToKubernetesClusterConfigOutputWithContext added in v0.18.2

func (i KubernetesClusterConfigArgs) ToKubernetesClusterConfigOutputWithContext(ctx context.Context) KubernetesClusterConfigOutput

func (KubernetesClusterConfigArgs) ToKubernetesClusterConfigPtrOutput added in v0.18.2

func (i KubernetesClusterConfigArgs) ToKubernetesClusterConfigPtrOutput() KubernetesClusterConfigPtrOutput

func (KubernetesClusterConfigArgs) ToKubernetesClusterConfigPtrOutputWithContext added in v0.18.2

func (i KubernetesClusterConfigArgs) ToKubernetesClusterConfigPtrOutputWithContext(ctx context.Context) KubernetesClusterConfigPtrOutput

type KubernetesClusterConfigInput added in v0.18.2

type KubernetesClusterConfigInput interface {
	pulumi.Input

	ToKubernetesClusterConfigOutput() KubernetesClusterConfigOutput
	ToKubernetesClusterConfigOutputWithContext(context.Context) KubernetesClusterConfigOutput
}

KubernetesClusterConfigInput is an input type that accepts KubernetesClusterConfigArgs and KubernetesClusterConfigOutput values. You can construct a concrete instance of `KubernetesClusterConfigInput` via:

KubernetesClusterConfigArgs{...}

type KubernetesClusterConfigOutput added in v0.18.2

type KubernetesClusterConfigOutput struct{ *pulumi.OutputState }

The configuration for running the Dataproc cluster on Kubernetes.

func (KubernetesClusterConfigOutput) ElementType added in v0.18.2

func (KubernetesClusterConfigOutput) GkeClusterConfig added in v0.18.2

The configuration for running the Dataproc cluster on GKE.

func (KubernetesClusterConfigOutput) KubernetesNamespace added in v0.18.2

func (o KubernetesClusterConfigOutput) KubernetesNamespace() pulumi.StringPtrOutput

Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.

func (KubernetesClusterConfigOutput) KubernetesSoftwareConfig added in v0.18.2

Optional. The software configuration for this Dataproc cluster running on Kubernetes.

func (KubernetesClusterConfigOutput) ToKubernetesClusterConfigOutput added in v0.18.2

func (o KubernetesClusterConfigOutput) ToKubernetesClusterConfigOutput() KubernetesClusterConfigOutput

func (KubernetesClusterConfigOutput) ToKubernetesClusterConfigOutputWithContext added in v0.18.2

func (o KubernetesClusterConfigOutput) ToKubernetesClusterConfigOutputWithContext(ctx context.Context) KubernetesClusterConfigOutput

func (KubernetesClusterConfigOutput) ToKubernetesClusterConfigPtrOutput added in v0.18.2

func (o KubernetesClusterConfigOutput) ToKubernetesClusterConfigPtrOutput() KubernetesClusterConfigPtrOutput

func (KubernetesClusterConfigOutput) ToKubernetesClusterConfigPtrOutputWithContext added in v0.18.2

func (o KubernetesClusterConfigOutput) ToKubernetesClusterConfigPtrOutputWithContext(ctx context.Context) KubernetesClusterConfigPtrOutput

type KubernetesClusterConfigPtrInput added in v0.18.2

type KubernetesClusterConfigPtrInput interface {
	pulumi.Input

	ToKubernetesClusterConfigPtrOutput() KubernetesClusterConfigPtrOutput
	ToKubernetesClusterConfigPtrOutputWithContext(context.Context) KubernetesClusterConfigPtrOutput
}

KubernetesClusterConfigPtrInput is an input type that accepts KubernetesClusterConfigArgs, KubernetesClusterConfigPtr and KubernetesClusterConfigPtrOutput values. You can construct a concrete instance of `KubernetesClusterConfigPtrInput` via:

        KubernetesClusterConfigArgs{...}

or:

        nil

func KubernetesClusterConfigPtr added in v0.18.2

func KubernetesClusterConfigPtr(v *KubernetesClusterConfigArgs) KubernetesClusterConfigPtrInput

type KubernetesClusterConfigPtrOutput added in v0.18.2

type KubernetesClusterConfigPtrOutput struct{ *pulumi.OutputState }

func (KubernetesClusterConfigPtrOutput) Elem added in v0.18.2

func (KubernetesClusterConfigPtrOutput) ElementType added in v0.18.2

func (KubernetesClusterConfigPtrOutput) GkeClusterConfig added in v0.18.2

The configuration for running the Dataproc cluster on GKE.

func (KubernetesClusterConfigPtrOutput) KubernetesNamespace added in v0.18.2

Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.

func (KubernetesClusterConfigPtrOutput) KubernetesSoftwareConfig added in v0.18.2

Optional. The software configuration for this Dataproc cluster running on Kubernetes.

func (KubernetesClusterConfigPtrOutput) ToKubernetesClusterConfigPtrOutput added in v0.18.2

func (o KubernetesClusterConfigPtrOutput) ToKubernetesClusterConfigPtrOutput() KubernetesClusterConfigPtrOutput

func (KubernetesClusterConfigPtrOutput) ToKubernetesClusterConfigPtrOutputWithContext added in v0.18.2

func (o KubernetesClusterConfigPtrOutput) ToKubernetesClusterConfigPtrOutputWithContext(ctx context.Context) KubernetesClusterConfigPtrOutput

type KubernetesClusterConfigResponse added in v0.18.2

type KubernetesClusterConfigResponse struct {
	// The configuration for running the Dataproc cluster on GKE.
	GkeClusterConfig GkeClusterConfigResponse `pulumi:"gkeClusterConfig"`
	// Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.
	KubernetesNamespace string `pulumi:"kubernetesNamespace"`
	// Optional. The software configuration for this Dataproc cluster running on Kubernetes.
	KubernetesSoftwareConfig KubernetesSoftwareConfigResponse `pulumi:"kubernetesSoftwareConfig"`
}

The configuration for running the Dataproc cluster on Kubernetes.

type KubernetesClusterConfigResponseOutput added in v0.18.2

type KubernetesClusterConfigResponseOutput struct{ *pulumi.OutputState }

The configuration for running the Dataproc cluster on Kubernetes.

func (KubernetesClusterConfigResponseOutput) ElementType added in v0.18.2

func (KubernetesClusterConfigResponseOutput) GkeClusterConfig added in v0.18.2

The configuration for running the Dataproc cluster on GKE.

func (KubernetesClusterConfigResponseOutput) KubernetesNamespace added in v0.18.2

Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.

func (KubernetesClusterConfigResponseOutput) KubernetesSoftwareConfig added in v0.18.2

Optional. The software configuration for this Dataproc cluster running on Kubernetes.

func (KubernetesClusterConfigResponseOutput) ToKubernetesClusterConfigResponseOutput added in v0.18.2

func (o KubernetesClusterConfigResponseOutput) ToKubernetesClusterConfigResponseOutput() KubernetesClusterConfigResponseOutput

func (KubernetesClusterConfigResponseOutput) ToKubernetesClusterConfigResponseOutputWithContext added in v0.18.2

func (o KubernetesClusterConfigResponseOutput) ToKubernetesClusterConfigResponseOutputWithContext(ctx context.Context) KubernetesClusterConfigResponseOutput

type KubernetesSoftwareConfig added in v0.18.2

type KubernetesSoftwareConfig struct {
	// The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.
	ComponentVersion map[string]string `pulumi:"componentVersion"`
	// The properties to set on daemon config files.Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image. The following are supported prefixes and their mappings: spark: spark-defaults.confFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties map[string]string `pulumi:"properties"`
}

The software configuration for this Dataproc cluster running on Kubernetes.

type KubernetesSoftwareConfigArgs added in v0.18.2

type KubernetesSoftwareConfigArgs struct {
	// The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.
	ComponentVersion pulumi.StringMapInput `pulumi:"componentVersion"`
	// The properties to set on daemon config files.Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image. The following are supported prefixes and their mappings: spark: spark-defaults.confFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties pulumi.StringMapInput `pulumi:"properties"`
}

The software configuration for this Dataproc cluster running on Kubernetes.

func (KubernetesSoftwareConfigArgs) ElementType added in v0.18.2

func (KubernetesSoftwareConfigArgs) ToKubernetesSoftwareConfigOutput added in v0.18.2

func (i KubernetesSoftwareConfigArgs) ToKubernetesSoftwareConfigOutput() KubernetesSoftwareConfigOutput

func (KubernetesSoftwareConfigArgs) ToKubernetesSoftwareConfigOutputWithContext added in v0.18.2

func (i KubernetesSoftwareConfigArgs) ToKubernetesSoftwareConfigOutputWithContext(ctx context.Context) KubernetesSoftwareConfigOutput

func (KubernetesSoftwareConfigArgs) ToKubernetesSoftwareConfigPtrOutput added in v0.18.2

func (i KubernetesSoftwareConfigArgs) ToKubernetesSoftwareConfigPtrOutput() KubernetesSoftwareConfigPtrOutput

func (KubernetesSoftwareConfigArgs) ToKubernetesSoftwareConfigPtrOutputWithContext added in v0.18.2

func (i KubernetesSoftwareConfigArgs) ToKubernetesSoftwareConfigPtrOutputWithContext(ctx context.Context) KubernetesSoftwareConfigPtrOutput

type KubernetesSoftwareConfigInput added in v0.18.2

type KubernetesSoftwareConfigInput interface {
	pulumi.Input

	ToKubernetesSoftwareConfigOutput() KubernetesSoftwareConfigOutput
	ToKubernetesSoftwareConfigOutputWithContext(context.Context) KubernetesSoftwareConfigOutput
}

KubernetesSoftwareConfigInput is an input type that accepts KubernetesSoftwareConfigArgs and KubernetesSoftwareConfigOutput values. You can construct a concrete instance of `KubernetesSoftwareConfigInput` via:

KubernetesSoftwareConfigArgs{...}

type KubernetesSoftwareConfigOutput added in v0.18.2

type KubernetesSoftwareConfigOutput struct{ *pulumi.OutputState }

The software configuration for this Dataproc cluster running on Kubernetes.

func (KubernetesSoftwareConfigOutput) ComponentVersion added in v0.18.2

The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.

func (KubernetesSoftwareConfigOutput) ElementType added in v0.18.2

func (KubernetesSoftwareConfigOutput) Properties added in v0.18.2

The properties to set on daemon config files.Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image. The following are supported prefixes and their mappings: spark: spark-defaults.confFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (KubernetesSoftwareConfigOutput) ToKubernetesSoftwareConfigOutput added in v0.18.2

func (o KubernetesSoftwareConfigOutput) ToKubernetesSoftwareConfigOutput() KubernetesSoftwareConfigOutput

func (KubernetesSoftwareConfigOutput) ToKubernetesSoftwareConfigOutputWithContext added in v0.18.2

func (o KubernetesSoftwareConfigOutput) ToKubernetesSoftwareConfigOutputWithContext(ctx context.Context) KubernetesSoftwareConfigOutput

func (KubernetesSoftwareConfigOutput) ToKubernetesSoftwareConfigPtrOutput added in v0.18.2

func (o KubernetesSoftwareConfigOutput) ToKubernetesSoftwareConfigPtrOutput() KubernetesSoftwareConfigPtrOutput

func (KubernetesSoftwareConfigOutput) ToKubernetesSoftwareConfigPtrOutputWithContext added in v0.18.2

func (o KubernetesSoftwareConfigOutput) ToKubernetesSoftwareConfigPtrOutputWithContext(ctx context.Context) KubernetesSoftwareConfigPtrOutput

type KubernetesSoftwareConfigPtrInput added in v0.18.2

type KubernetesSoftwareConfigPtrInput interface {
	pulumi.Input

	ToKubernetesSoftwareConfigPtrOutput() KubernetesSoftwareConfigPtrOutput
	ToKubernetesSoftwareConfigPtrOutputWithContext(context.Context) KubernetesSoftwareConfigPtrOutput
}

KubernetesSoftwareConfigPtrInput is an input type that accepts KubernetesSoftwareConfigArgs, KubernetesSoftwareConfigPtr and KubernetesSoftwareConfigPtrOutput values. You can construct a concrete instance of `KubernetesSoftwareConfigPtrInput` via:

        KubernetesSoftwareConfigArgs{...}

or:

        nil

func KubernetesSoftwareConfigPtr added in v0.18.2

func KubernetesSoftwareConfigPtr(v *KubernetesSoftwareConfigArgs) KubernetesSoftwareConfigPtrInput

type KubernetesSoftwareConfigPtrOutput added in v0.18.2

type KubernetesSoftwareConfigPtrOutput struct{ *pulumi.OutputState }

func (KubernetesSoftwareConfigPtrOutput) ComponentVersion added in v0.18.2

The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.

func (KubernetesSoftwareConfigPtrOutput) Elem added in v0.18.2

func (KubernetesSoftwareConfigPtrOutput) ElementType added in v0.18.2

func (KubernetesSoftwareConfigPtrOutput) Properties added in v0.18.2

The properties to set on daemon config files.Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image. The following are supported prefixes and their mappings: spark: spark-defaults.confFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (KubernetesSoftwareConfigPtrOutput) ToKubernetesSoftwareConfigPtrOutput added in v0.18.2

func (o KubernetesSoftwareConfigPtrOutput) ToKubernetesSoftwareConfigPtrOutput() KubernetesSoftwareConfigPtrOutput

func (KubernetesSoftwareConfigPtrOutput) ToKubernetesSoftwareConfigPtrOutputWithContext added in v0.18.2

func (o KubernetesSoftwareConfigPtrOutput) ToKubernetesSoftwareConfigPtrOutputWithContext(ctx context.Context) KubernetesSoftwareConfigPtrOutput

type KubernetesSoftwareConfigResponse added in v0.18.2

type KubernetesSoftwareConfigResponse struct {
	// The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.
	ComponentVersion map[string]string `pulumi:"componentVersion"`
	// The properties to set on daemon config files.Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image. The following are supported prefixes and their mappings: spark: spark-defaults.confFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties map[string]string `pulumi:"properties"`
}

The software configuration for this Dataproc cluster running on Kubernetes.

type KubernetesSoftwareConfigResponseOutput added in v0.18.2

type KubernetesSoftwareConfigResponseOutput struct{ *pulumi.OutputState }

The software configuration for this Dataproc cluster running on Kubernetes.

func (KubernetesSoftwareConfigResponseOutput) ComponentVersion added in v0.18.2

The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.

func (KubernetesSoftwareConfigResponseOutput) ElementType added in v0.18.2

func (KubernetesSoftwareConfigResponseOutput) Properties added in v0.18.2

The properties to set on daemon config files.Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image. The following are supported prefixes and their mappings: spark: spark-defaults.confFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (KubernetesSoftwareConfigResponseOutput) ToKubernetesSoftwareConfigResponseOutput added in v0.18.2

func (o KubernetesSoftwareConfigResponseOutput) ToKubernetesSoftwareConfigResponseOutput() KubernetesSoftwareConfigResponseOutput

func (KubernetesSoftwareConfigResponseOutput) ToKubernetesSoftwareConfigResponseOutputWithContext added in v0.18.2

func (o KubernetesSoftwareConfigResponseOutput) ToKubernetesSoftwareConfigResponseOutputWithContext(ctx context.Context) KubernetesSoftwareConfigResponseOutput

type LifecycleConfig

type LifecycleConfig struct {
	// Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	AutoDeleteTime *string `pulumi:"autoDeleteTime"`
	// Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	AutoDeleteTtl *string `pulumi:"autoDeleteTtl"`
	// Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	IdleDeleteTtl *string `pulumi:"idleDeleteTtl"`
}

Specifies the cluster auto-delete schedule configuration.

type LifecycleConfigArgs

type LifecycleConfigArgs struct {
	// Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	AutoDeleteTime pulumi.StringPtrInput `pulumi:"autoDeleteTime"`
	// Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	AutoDeleteTtl pulumi.StringPtrInput `pulumi:"autoDeleteTtl"`
	// Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	IdleDeleteTtl pulumi.StringPtrInput `pulumi:"idleDeleteTtl"`
}

Specifies the cluster auto-delete schedule configuration.

func (LifecycleConfigArgs) ElementType

func (LifecycleConfigArgs) ElementType() reflect.Type

func (LifecycleConfigArgs) ToLifecycleConfigOutput

func (i LifecycleConfigArgs) ToLifecycleConfigOutput() LifecycleConfigOutput

func (LifecycleConfigArgs) ToLifecycleConfigOutputWithContext

func (i LifecycleConfigArgs) ToLifecycleConfigOutputWithContext(ctx context.Context) LifecycleConfigOutput

func (LifecycleConfigArgs) ToLifecycleConfigPtrOutput

func (i LifecycleConfigArgs) ToLifecycleConfigPtrOutput() LifecycleConfigPtrOutput

func (LifecycleConfigArgs) ToLifecycleConfigPtrOutputWithContext

func (i LifecycleConfigArgs) ToLifecycleConfigPtrOutputWithContext(ctx context.Context) LifecycleConfigPtrOutput

type LifecycleConfigInput

type LifecycleConfigInput interface {
	pulumi.Input

	ToLifecycleConfigOutput() LifecycleConfigOutput
	ToLifecycleConfigOutputWithContext(context.Context) LifecycleConfigOutput
}

LifecycleConfigInput is an input type that accepts LifecycleConfigArgs and LifecycleConfigOutput values. You can construct a concrete instance of `LifecycleConfigInput` via:

LifecycleConfigArgs{...}

type LifecycleConfigOutput

type LifecycleConfigOutput struct{ *pulumi.OutputState }

Specifies the cluster auto-delete schedule configuration.

func (LifecycleConfigOutput) AutoDeleteTime

func (o LifecycleConfigOutput) AutoDeleteTime() pulumi.StringPtrOutput

Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (LifecycleConfigOutput) AutoDeleteTtl

func (o LifecycleConfigOutput) AutoDeleteTtl() pulumi.StringPtrOutput

Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (LifecycleConfigOutput) ElementType

func (LifecycleConfigOutput) ElementType() reflect.Type

func (LifecycleConfigOutput) IdleDeleteTtl

func (o LifecycleConfigOutput) IdleDeleteTtl() pulumi.StringPtrOutput

Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (LifecycleConfigOutput) ToLifecycleConfigOutput

func (o LifecycleConfigOutput) ToLifecycleConfigOutput() LifecycleConfigOutput

func (LifecycleConfigOutput) ToLifecycleConfigOutputWithContext

func (o LifecycleConfigOutput) ToLifecycleConfigOutputWithContext(ctx context.Context) LifecycleConfigOutput

func (LifecycleConfigOutput) ToLifecycleConfigPtrOutput

func (o LifecycleConfigOutput) ToLifecycleConfigPtrOutput() LifecycleConfigPtrOutput

func (LifecycleConfigOutput) ToLifecycleConfigPtrOutputWithContext

func (o LifecycleConfigOutput) ToLifecycleConfigPtrOutputWithContext(ctx context.Context) LifecycleConfigPtrOutput

type LifecycleConfigPtrInput

type LifecycleConfigPtrInput interface {
	pulumi.Input

	ToLifecycleConfigPtrOutput() LifecycleConfigPtrOutput
	ToLifecycleConfigPtrOutputWithContext(context.Context) LifecycleConfigPtrOutput
}

LifecycleConfigPtrInput is an input type that accepts LifecycleConfigArgs, LifecycleConfigPtr and LifecycleConfigPtrOutput values. You can construct a concrete instance of `LifecycleConfigPtrInput` via:

        LifecycleConfigArgs{...}

or:

        nil

type LifecycleConfigPtrOutput

type LifecycleConfigPtrOutput struct{ *pulumi.OutputState }

func (LifecycleConfigPtrOutput) AutoDeleteTime

func (o LifecycleConfigPtrOutput) AutoDeleteTime() pulumi.StringPtrOutput

Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (LifecycleConfigPtrOutput) AutoDeleteTtl

Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (LifecycleConfigPtrOutput) Elem

func (LifecycleConfigPtrOutput) ElementType

func (LifecycleConfigPtrOutput) ElementType() reflect.Type

func (LifecycleConfigPtrOutput) IdleDeleteTtl

Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (LifecycleConfigPtrOutput) ToLifecycleConfigPtrOutput

func (o LifecycleConfigPtrOutput) ToLifecycleConfigPtrOutput() LifecycleConfigPtrOutput

func (LifecycleConfigPtrOutput) ToLifecycleConfigPtrOutputWithContext

func (o LifecycleConfigPtrOutput) ToLifecycleConfigPtrOutputWithContext(ctx context.Context) LifecycleConfigPtrOutput

type LifecycleConfigResponse

type LifecycleConfigResponse struct {
	// Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	AutoDeleteTime string `pulumi:"autoDeleteTime"`
	// Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	AutoDeleteTtl string `pulumi:"autoDeleteTtl"`
	// Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	IdleDeleteTtl string `pulumi:"idleDeleteTtl"`
	// The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	IdleStartTime string `pulumi:"idleStartTime"`
}

Specifies the cluster auto-delete schedule configuration.

type LifecycleConfigResponseOutput

type LifecycleConfigResponseOutput struct{ *pulumi.OutputState }

Specifies the cluster auto-delete schedule configuration.

func (LifecycleConfigResponseOutput) AutoDeleteTime

Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (LifecycleConfigResponseOutput) AutoDeleteTtl

Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (LifecycleConfigResponseOutput) ElementType

func (LifecycleConfigResponseOutput) IdleDeleteTtl

Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (LifecycleConfigResponseOutput) IdleStartTime

The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (LifecycleConfigResponseOutput) ToLifecycleConfigResponseOutput

func (o LifecycleConfigResponseOutput) ToLifecycleConfigResponseOutput() LifecycleConfigResponseOutput

func (LifecycleConfigResponseOutput) ToLifecycleConfigResponseOutputWithContext

func (o LifecycleConfigResponseOutput) ToLifecycleConfigResponseOutputWithContext(ctx context.Context) LifecycleConfigResponseOutput

type LoggingConfig

type LoggingConfig struct {
	// The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG'
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

The runtime logging config of the job.

type LoggingConfigArgs

type LoggingConfigArgs struct {
	// The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG'
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

The runtime logging config of the job.

func (LoggingConfigArgs) ElementType

func (LoggingConfigArgs) ElementType() reflect.Type

func (LoggingConfigArgs) ToLoggingConfigOutput

func (i LoggingConfigArgs) ToLoggingConfigOutput() LoggingConfigOutput

func (LoggingConfigArgs) ToLoggingConfigOutputWithContext

func (i LoggingConfigArgs) ToLoggingConfigOutputWithContext(ctx context.Context) LoggingConfigOutput

func (LoggingConfigArgs) ToLoggingConfigPtrOutput

func (i LoggingConfigArgs) ToLoggingConfigPtrOutput() LoggingConfigPtrOutput

func (LoggingConfigArgs) ToLoggingConfigPtrOutputWithContext

func (i LoggingConfigArgs) ToLoggingConfigPtrOutputWithContext(ctx context.Context) LoggingConfigPtrOutput

type LoggingConfigInput

type LoggingConfigInput interface {
	pulumi.Input

	ToLoggingConfigOutput() LoggingConfigOutput
	ToLoggingConfigOutputWithContext(context.Context) LoggingConfigOutput
}

LoggingConfigInput is an input type that accepts LoggingConfigArgs and LoggingConfigOutput values. You can construct a concrete instance of `LoggingConfigInput` via:

LoggingConfigArgs{...}

type LoggingConfigOutput

type LoggingConfigOutput struct{ *pulumi.OutputState }

The runtime logging config of the job.

func (LoggingConfigOutput) DriverLogLevels

func (o LoggingConfigOutput) DriverLogLevels() pulumi.StringMapOutput

The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG'

func (LoggingConfigOutput) ElementType

func (LoggingConfigOutput) ElementType() reflect.Type

func (LoggingConfigOutput) ToLoggingConfigOutput

func (o LoggingConfigOutput) ToLoggingConfigOutput() LoggingConfigOutput

func (LoggingConfigOutput) ToLoggingConfigOutputWithContext

func (o LoggingConfigOutput) ToLoggingConfigOutputWithContext(ctx context.Context) LoggingConfigOutput

func (LoggingConfigOutput) ToLoggingConfigPtrOutput

func (o LoggingConfigOutput) ToLoggingConfigPtrOutput() LoggingConfigPtrOutput

func (LoggingConfigOutput) ToLoggingConfigPtrOutputWithContext

func (o LoggingConfigOutput) ToLoggingConfigPtrOutputWithContext(ctx context.Context) LoggingConfigPtrOutput

type LoggingConfigPtrInput

type LoggingConfigPtrInput interface {
	pulumi.Input

	ToLoggingConfigPtrOutput() LoggingConfigPtrOutput
	ToLoggingConfigPtrOutputWithContext(context.Context) LoggingConfigPtrOutput
}

LoggingConfigPtrInput is an input type that accepts LoggingConfigArgs, LoggingConfigPtr and LoggingConfigPtrOutput values. You can construct a concrete instance of `LoggingConfigPtrInput` via:

        LoggingConfigArgs{...}

or:

        nil

type LoggingConfigPtrOutput

type LoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (LoggingConfigPtrOutput) DriverLogLevels

func (o LoggingConfigPtrOutput) DriverLogLevels() pulumi.StringMapOutput

The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG'

func (LoggingConfigPtrOutput) Elem

func (LoggingConfigPtrOutput) ElementType

func (LoggingConfigPtrOutput) ElementType() reflect.Type

func (LoggingConfigPtrOutput) ToLoggingConfigPtrOutput

func (o LoggingConfigPtrOutput) ToLoggingConfigPtrOutput() LoggingConfigPtrOutput

func (LoggingConfigPtrOutput) ToLoggingConfigPtrOutputWithContext

func (o LoggingConfigPtrOutput) ToLoggingConfigPtrOutputWithContext(ctx context.Context) LoggingConfigPtrOutput

type LoggingConfigResponse

type LoggingConfigResponse struct {
	// The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG'
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

The runtime logging config of the job.

type LoggingConfigResponseOutput

type LoggingConfigResponseOutput struct{ *pulumi.OutputState }

The runtime logging config of the job.

func (LoggingConfigResponseOutput) DriverLogLevels

The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG'

func (LoggingConfigResponseOutput) ElementType

func (LoggingConfigResponseOutput) ToLoggingConfigResponseOutput

func (o LoggingConfigResponseOutput) ToLoggingConfigResponseOutput() LoggingConfigResponseOutput

func (LoggingConfigResponseOutput) ToLoggingConfigResponseOutputWithContext

func (o LoggingConfigResponseOutput) ToLoggingConfigResponseOutputWithContext(ctx context.Context) LoggingConfigResponseOutput

type LookupAutoscalingPolicyArgs added in v0.4.0

type LookupAutoscalingPolicyArgs struct {
	AutoscalingPolicyId string  `pulumi:"autoscalingPolicyId"`
	Location            string  `pulumi:"location"`
	Project             *string `pulumi:"project"`
}

type LookupAutoscalingPolicyIamPolicyArgs added in v0.4.0

type LookupAutoscalingPolicyIamPolicyArgs struct {
	AutoscalingPolicyId string  `pulumi:"autoscalingPolicyId"`
	Location            string  `pulumi:"location"`
	Project             *string `pulumi:"project"`
}

type LookupAutoscalingPolicyIamPolicyOutputArgs added in v0.8.0

type LookupAutoscalingPolicyIamPolicyOutputArgs struct {
	AutoscalingPolicyId pulumi.StringInput    `pulumi:"autoscalingPolicyId"`
	Location            pulumi.StringInput    `pulumi:"location"`
	Project             pulumi.StringPtrInput `pulumi:"project"`
}

func (LookupAutoscalingPolicyIamPolicyOutputArgs) ElementType added in v0.8.0

type LookupAutoscalingPolicyIamPolicyResult added in v0.4.0

type LookupAutoscalingPolicyIamPolicyResult struct {
	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings []BindingResponse `pulumi:"bindings"`
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag string `pulumi:"etag"`
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version int `pulumi:"version"`
}

func LookupAutoscalingPolicyIamPolicy added in v0.4.0

Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.

type LookupAutoscalingPolicyIamPolicyResultOutput added in v0.8.0

type LookupAutoscalingPolicyIamPolicyResultOutput struct{ *pulumi.OutputState }

func (LookupAutoscalingPolicyIamPolicyResultOutput) Bindings added in v0.8.0

Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.

func (LookupAutoscalingPolicyIamPolicyResultOutput) ElementType added in v0.8.0

func (LookupAutoscalingPolicyIamPolicyResultOutput) Etag added in v0.8.0

etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.

func (LookupAutoscalingPolicyIamPolicyResultOutput) ToLookupAutoscalingPolicyIamPolicyResultOutput added in v0.8.0

func (o LookupAutoscalingPolicyIamPolicyResultOutput) ToLookupAutoscalingPolicyIamPolicyResultOutput() LookupAutoscalingPolicyIamPolicyResultOutput

func (LookupAutoscalingPolicyIamPolicyResultOutput) ToLookupAutoscalingPolicyIamPolicyResultOutputWithContext added in v0.8.0

func (o LookupAutoscalingPolicyIamPolicyResultOutput) ToLookupAutoscalingPolicyIamPolicyResultOutputWithContext(ctx context.Context) LookupAutoscalingPolicyIamPolicyResultOutput

func (LookupAutoscalingPolicyIamPolicyResultOutput) Version added in v0.8.0

Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).

type LookupAutoscalingPolicyOutputArgs added in v0.8.0

type LookupAutoscalingPolicyOutputArgs struct {
	AutoscalingPolicyId pulumi.StringInput    `pulumi:"autoscalingPolicyId"`
	Location            pulumi.StringInput    `pulumi:"location"`
	Project             pulumi.StringPtrInput `pulumi:"project"`
}

func (LookupAutoscalingPolicyOutputArgs) ElementType added in v0.8.0

type LookupAutoscalingPolicyResult added in v0.4.0

type LookupAutoscalingPolicyResult struct {
	BasicAlgorithm BasicAutoscalingAlgorithmResponse `pulumi:"basicAlgorithm"`
	// Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy.
	Labels map[string]string `pulumi:"labels"`
	// The "resource name" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}
	Name string `pulumi:"name"`
	// Optional. Describes how the autoscaler will operate for secondary workers.
	SecondaryWorkerConfig InstanceGroupAutoscalingPolicyConfigResponse `pulumi:"secondaryWorkerConfig"`
	// Describes how the autoscaler will operate for primary workers.
	WorkerConfig InstanceGroupAutoscalingPolicyConfigResponse `pulumi:"workerConfig"`
}

func LookupAutoscalingPolicy added in v0.4.0

func LookupAutoscalingPolicy(ctx *pulumi.Context, args *LookupAutoscalingPolicyArgs, opts ...pulumi.InvokeOption) (*LookupAutoscalingPolicyResult, error)

Retrieves autoscaling policy.

type LookupAutoscalingPolicyResultOutput added in v0.8.0

type LookupAutoscalingPolicyResultOutput struct{ *pulumi.OutputState }

func (LookupAutoscalingPolicyResultOutput) BasicAlgorithm added in v0.8.0

func (LookupAutoscalingPolicyResultOutput) ElementType added in v0.8.0

func (LookupAutoscalingPolicyResultOutput) Labels added in v0.9.0

Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy.

func (LookupAutoscalingPolicyResultOutput) Name added in v0.8.0

The "resource name" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}

func (LookupAutoscalingPolicyResultOutput) SecondaryWorkerConfig added in v0.8.0

Optional. Describes how the autoscaler will operate for secondary workers.

func (LookupAutoscalingPolicyResultOutput) ToLookupAutoscalingPolicyResultOutput added in v0.8.0

func (o LookupAutoscalingPolicyResultOutput) ToLookupAutoscalingPolicyResultOutput() LookupAutoscalingPolicyResultOutput

func (LookupAutoscalingPolicyResultOutput) ToLookupAutoscalingPolicyResultOutputWithContext added in v0.8.0

func (o LookupAutoscalingPolicyResultOutput) ToLookupAutoscalingPolicyResultOutputWithContext(ctx context.Context) LookupAutoscalingPolicyResultOutput

func (LookupAutoscalingPolicyResultOutput) WorkerConfig added in v0.8.0

Describes how the autoscaler will operate for primary workers.

type LookupBatchArgs added in v0.12.0

type LookupBatchArgs struct {
	BatchId  string  `pulumi:"batchId"`
	Location string  `pulumi:"location"`
	Project  *string `pulumi:"project"`
}

type LookupBatchOutputArgs added in v0.12.0

type LookupBatchOutputArgs struct {
	BatchId  pulumi.StringInput    `pulumi:"batchId"`
	Location pulumi.StringInput    `pulumi:"location"`
	Project  pulumi.StringPtrInput `pulumi:"project"`
}

func (LookupBatchOutputArgs) ElementType added in v0.12.0

func (LookupBatchOutputArgs) ElementType() reflect.Type

type LookupBatchResult added in v0.12.0

type LookupBatchResult struct {
	// The time when the batch was created.
	CreateTime string `pulumi:"createTime"`
	// The email address of the user who created the batch.
	Creator string `pulumi:"creator"`
	// Optional. Environment configuration for the batch execution.
	EnvironmentConfig EnvironmentConfigResponse `pulumi:"environmentConfig"`
	// Optional. The labels to associate with this batch. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a batch.
	Labels map[string]string `pulumi:"labels"`
	// The resource name of the batch.
	Name string `pulumi:"name"`
	// The resource name of the operation associated with this batch.
	Operation string `pulumi:"operation"`
	// Optional. PySpark batch config.
	PysparkBatch PySparkBatchResponse `pulumi:"pysparkBatch"`
	// Optional. Runtime configuration for the batch execution.
	RuntimeConfig RuntimeConfigResponse `pulumi:"runtimeConfig"`
	// Runtime information about batch execution.
	RuntimeInfo RuntimeInfoResponse `pulumi:"runtimeInfo"`
	// Optional. Spark batch config.
	SparkBatch SparkBatchResponse `pulumi:"sparkBatch"`
	// Optional. SparkR batch config.
	SparkRBatch SparkRBatchResponse `pulumi:"sparkRBatch"`
	// Optional. SparkSql batch config.
	SparkSqlBatch SparkSqlBatchResponse `pulumi:"sparkSqlBatch"`
	// The state of the batch.
	State string `pulumi:"state"`
	// Historical state information for the batch.
	StateHistory []StateHistoryResponse `pulumi:"stateHistory"`
	// Batch state details, such as a failure description if the state is FAILED.
	StateMessage string `pulumi:"stateMessage"`
	// The time when the batch entered a current state.
	StateTime string `pulumi:"stateTime"`
	// A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
	Uuid string `pulumi:"uuid"`
}

func LookupBatch added in v0.12.0

func LookupBatch(ctx *pulumi.Context, args *LookupBatchArgs, opts ...pulumi.InvokeOption) (*LookupBatchResult, error)

Gets the batch workload resource representation.

type LookupBatchResultOutput added in v0.12.0

type LookupBatchResultOutput struct{ *pulumi.OutputState }

func LookupBatchOutput added in v0.12.0

func LookupBatchOutput(ctx *pulumi.Context, args LookupBatchOutputArgs, opts ...pulumi.InvokeOption) LookupBatchResultOutput

func (LookupBatchResultOutput) CreateTime added in v0.12.0

The time when the batch was created.

func (LookupBatchResultOutput) Creator added in v0.12.0

The email address of the user who created the batch.

func (LookupBatchResultOutput) ElementType added in v0.12.0

func (LookupBatchResultOutput) ElementType() reflect.Type

func (LookupBatchResultOutput) EnvironmentConfig added in v0.12.0

Optional. Environment configuration for the batch execution.

func (LookupBatchResultOutput) Labels added in v0.12.0

Optional. The labels to associate with this batch. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a batch.

func (LookupBatchResultOutput) Name added in v0.12.0

The resource name of the batch.

func (LookupBatchResultOutput) Operation added in v0.12.0

The resource name of the operation associated with this batch.

func (LookupBatchResultOutput) PysparkBatch added in v0.12.0

Optional. PySpark batch config.

func (LookupBatchResultOutput) RuntimeConfig added in v0.12.0

Optional. Runtime configuration for the batch execution.

func (LookupBatchResultOutput) RuntimeInfo added in v0.12.0

Runtime information about batch execution.

func (LookupBatchResultOutput) SparkBatch added in v0.12.0

Optional. Spark batch config.

func (LookupBatchResultOutput) SparkRBatch added in v0.12.0

Optional. SparkR batch config.

func (LookupBatchResultOutput) SparkSqlBatch added in v0.12.0

Optional. SparkSql batch config.

func (LookupBatchResultOutput) State added in v0.12.0

The state of the batch.

func (LookupBatchResultOutput) StateHistory added in v0.12.0

Historical state information for the batch.

func (LookupBatchResultOutput) StateMessage added in v0.12.0

func (o LookupBatchResultOutput) StateMessage() pulumi.StringOutput

Batch state details, such as a failure description if the state is FAILED.

func (LookupBatchResultOutput) StateTime added in v0.12.0

The time when the batch entered a current state.

func (LookupBatchResultOutput) ToLookupBatchResultOutput added in v0.12.0

func (o LookupBatchResultOutput) ToLookupBatchResultOutput() LookupBatchResultOutput

func (LookupBatchResultOutput) ToLookupBatchResultOutputWithContext added in v0.12.0

func (o LookupBatchResultOutput) ToLookupBatchResultOutputWithContext(ctx context.Context) LookupBatchResultOutput

func (LookupBatchResultOutput) Uuid added in v0.12.0

A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.

type LookupClusterArgs added in v0.4.0

type LookupClusterArgs struct {
	ClusterName string  `pulumi:"clusterName"`
	Project     *string `pulumi:"project"`
	Region      string  `pulumi:"region"`
}

type LookupClusterOutputArgs added in v0.8.0

type LookupClusterOutputArgs struct {
	ClusterName pulumi.StringInput    `pulumi:"clusterName"`
	Project     pulumi.StringPtrInput `pulumi:"project"`
	Region      pulumi.StringInput    `pulumi:"region"`
}

func (LookupClusterOutputArgs) ElementType added in v0.8.0

func (LookupClusterOutputArgs) ElementType() reflect.Type

type LookupClusterResult added in v0.4.0

type LookupClusterResult struct {
	// The cluster name, which must be unique within a project. The name must start with a lowercase letter, and can contain up to 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The name of a deleted cluster can be reused.
	ClusterName string `pulumi:"clusterName"`
	// A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster.
	ClusterUuid string `pulumi:"clusterUuid"`
	// Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated.Exactly one of ClusterConfig or VirtualClusterConfig must be specified.
	Config ClusterConfigResponse `pulumi:"config"`
	// Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.
	Labels map[string]string `pulumi:"labels"`
	// Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.
	Metrics ClusterMetricsResponse `pulumi:"metrics"`
	// The Google Cloud Platform project ID that the cluster belongs to.
	Project string `pulumi:"project"`
	// Cluster status.
	Status ClusterStatusResponse `pulumi:"status"`
	// The previous cluster status.
	StatusHistory []ClusterStatusResponse `pulumi:"statusHistory"`
	// Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified.
	VirtualClusterConfig VirtualClusterConfigResponse `pulumi:"virtualClusterConfig"`
}

func LookupCluster added in v0.4.0

func LookupCluster(ctx *pulumi.Context, args *LookupClusterArgs, opts ...pulumi.InvokeOption) (*LookupClusterResult, error)

Gets the resource representation for a cluster in a project.

type LookupClusterResultOutput added in v0.8.0

type LookupClusterResultOutput struct{ *pulumi.OutputState }

func LookupClusterOutput added in v0.8.0

func LookupClusterOutput(ctx *pulumi.Context, args LookupClusterOutputArgs, opts ...pulumi.InvokeOption) LookupClusterResultOutput

func (LookupClusterResultOutput) ClusterName added in v0.8.0

The cluster name, which must be unique within a project. The name must start with a lowercase letter, and can contain up to 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The name of a deleted cluster can be reused.

func (LookupClusterResultOutput) ClusterUuid added in v0.8.0

A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster.

func (LookupClusterResultOutput) Config added in v0.8.0

Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated.Exactly one of ClusterConfig or VirtualClusterConfig must be specified.

func (LookupClusterResultOutput) ElementType added in v0.8.0

func (LookupClusterResultOutput) ElementType() reflect.Type

func (LookupClusterResultOutput) Labels added in v0.8.0

Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.

func (LookupClusterResultOutput) Metrics added in v0.8.0

Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.

func (LookupClusterResultOutput) Project added in v0.8.0

The Google Cloud Platform project ID that the cluster belongs to.

func (LookupClusterResultOutput) Status added in v0.8.0

Cluster status.

func (LookupClusterResultOutput) StatusHistory added in v0.8.0

The previous cluster status.

func (LookupClusterResultOutput) ToLookupClusterResultOutput added in v0.8.0

func (o LookupClusterResultOutput) ToLookupClusterResultOutput() LookupClusterResultOutput

func (LookupClusterResultOutput) ToLookupClusterResultOutputWithContext added in v0.8.0

func (o LookupClusterResultOutput) ToLookupClusterResultOutputWithContext(ctx context.Context) LookupClusterResultOutput

func (LookupClusterResultOutput) VirtualClusterConfig added in v0.18.2

Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified.

type LookupJobArgs added in v0.4.0

type LookupJobArgs struct {
	JobId   string  `pulumi:"jobId"`
	Project *string `pulumi:"project"`
	Region  string  `pulumi:"region"`
}

type LookupJobOutputArgs added in v0.8.0

type LookupJobOutputArgs struct {
	JobId   pulumi.StringInput    `pulumi:"jobId"`
	Project pulumi.StringPtrInput `pulumi:"project"`
	Region  pulumi.StringInput    `pulumi:"region"`
}

func (LookupJobOutputArgs) ElementType added in v0.8.0

func (LookupJobOutputArgs) ElementType() reflect.Type

type LookupJobResult added in v0.4.0

type LookupJobResult struct {
	// Indicates whether the job is completed. If the value is false, the job is still in progress. If true, the job is completed, and status.state field will indicate if it was successful, failed, or cancelled.
	Done bool `pulumi:"done"`
	// If present, the location of miscellaneous control files which can be used as part of job setup and handling. If not present, control files might be placed in the same location as driver_output_uri.
	DriverControlFilesUri string `pulumi:"driverControlFilesUri"`
	// A URI pointing to the location of the stdout of the job's driver program.
	DriverOutputResourceUri string `pulumi:"driverOutputResourceUri"`
	// Optional. Driver scheduling configuration.
	DriverSchedulingConfig DriverSchedulingConfigResponse `pulumi:"driverSchedulingConfig"`
	// Optional. Job is a Flink job.
	FlinkJob FlinkJobResponse `pulumi:"flinkJob"`
	// Optional. Job is a Hadoop job.
	HadoopJob HadoopJobResponse `pulumi:"hadoopJob"`
	// Optional. Job is a Hive job.
	HiveJob HiveJobResponse `pulumi:"hiveJob"`
	// A UUID that uniquely identifies a job within the project over time. This is in contrast to a user-settable reference.job_id that might be reused over time.
	JobUuid string `pulumi:"jobUuid"`
	// Optional. The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
	Labels map[string]string `pulumi:"labels"`
	// Optional. Job is a Pig job.
	PigJob PigJobResponse `pulumi:"pigJob"`
	// Job information, including how, when, and where to run the job.
	Placement JobPlacementResponse `pulumi:"placement"`
	// Optional. Job is a Presto job.
	PrestoJob PrestoJobResponse `pulumi:"prestoJob"`
	// Optional. Job is a PySpark job.
	PysparkJob PySparkJobResponse `pulumi:"pysparkJob"`
	// Optional. The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id.
	Reference JobReferenceResponse `pulumi:"reference"`
	// Optional. Job scheduling configuration.
	Scheduling JobSchedulingResponse `pulumi:"scheduling"`
	// Optional. Job is a Spark job.
	SparkJob SparkJobResponse `pulumi:"sparkJob"`
	// Optional. Job is a SparkR job.
	SparkRJob SparkRJobResponse `pulumi:"sparkRJob"`
	// Optional. Job is a SparkSql job.
	SparkSqlJob SparkSqlJobResponse `pulumi:"sparkSqlJob"`
	// The job status. Additional application-specific status information might be contained in the type_job and yarn_applications fields.
	Status JobStatusResponse `pulumi:"status"`
	// The previous job status.
	StatusHistory []JobStatusResponse `pulumi:"statusHistory"`
	// Optional. Job is a Trino job.
	TrinoJob TrinoJobResponse `pulumi:"trinoJob"`
	// The collection of YARN applications spun up by this job.Beta Feature: This report is available for testing purposes only. It might be changed before final release.
	YarnApplications []YarnApplicationResponse `pulumi:"yarnApplications"`
}

func LookupJob added in v0.4.0

func LookupJob(ctx *pulumi.Context, args *LookupJobArgs, opts ...pulumi.InvokeOption) (*LookupJobResult, error)

Gets the resource representation for a job in a project.

type LookupJobResultOutput added in v0.8.0

type LookupJobResultOutput struct{ *pulumi.OutputState }

func LookupJobOutput added in v0.8.0

func LookupJobOutput(ctx *pulumi.Context, args LookupJobOutputArgs, opts ...pulumi.InvokeOption) LookupJobResultOutput

func (LookupJobResultOutput) Done added in v0.8.0

Indicates whether the job is completed. If the value is false, the job is still in progress. If true, the job is completed, and status.state field will indicate if it was successful, failed, or cancelled.

func (LookupJobResultOutput) DriverControlFilesUri added in v0.8.0

func (o LookupJobResultOutput) DriverControlFilesUri() pulumi.StringOutput

If present, the location of miscellaneous control files which can be used as part of job setup and handling. If not present, control files might be placed in the same location as driver_output_uri.

func (LookupJobResultOutput) DriverOutputResourceUri added in v0.8.0

func (o LookupJobResultOutput) DriverOutputResourceUri() pulumi.StringOutput

A URI pointing to the location of the stdout of the job's driver program.

func (LookupJobResultOutput) DriverSchedulingConfig added in v0.28.0

Optional. Driver scheduling configuration.

func (LookupJobResultOutput) ElementType added in v0.8.0

func (LookupJobResultOutput) ElementType() reflect.Type

func (LookupJobResultOutput) FlinkJob added in v0.32.0

Optional. Job is a Flink job.

func (LookupJobResultOutput) HadoopJob added in v0.8.0

Optional. Job is a Hadoop job.

func (LookupJobResultOutput) HiveJob added in v0.8.0

Optional. Job is a Hive job.

func (LookupJobResultOutput) JobUuid added in v0.8.0

A UUID that uniquely identifies a job within the project over time. This is in contrast to a user-settable reference.job_id that might be reused over time.

func (LookupJobResultOutput) Labels added in v0.8.0

Optional. The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.

func (LookupJobResultOutput) PigJob added in v0.8.0

Optional. Job is a Pig job.

func (LookupJobResultOutput) Placement added in v0.8.0

Job information, including how, when, and where to run the job.

func (LookupJobResultOutput) PrestoJob added in v0.8.0

Optional. Job is a Presto job.

func (LookupJobResultOutput) PysparkJob added in v0.8.0

Optional. Job is a PySpark job.

func (LookupJobResultOutput) Reference added in v0.8.0

Optional. The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id.

func (LookupJobResultOutput) Scheduling added in v0.8.0

Optional. Job scheduling configuration.

func (LookupJobResultOutput) SparkJob added in v0.8.0

Optional. Job is a Spark job.

func (LookupJobResultOutput) SparkRJob added in v0.8.0

Optional. Job is a SparkR job.

func (LookupJobResultOutput) SparkSqlJob added in v0.8.0

Optional. Job is a SparkSql job.

func (LookupJobResultOutput) Status added in v0.8.0

The job status. Additional application-specific status information might be contained in the type_job and yarn_applications fields.

func (LookupJobResultOutput) StatusHistory added in v0.8.0

The previous job status.

func (LookupJobResultOutput) ToLookupJobResultOutput added in v0.8.0

func (o LookupJobResultOutput) ToLookupJobResultOutput() LookupJobResultOutput

func (LookupJobResultOutput) ToLookupJobResultOutputWithContext added in v0.8.0

func (o LookupJobResultOutput) ToLookupJobResultOutputWithContext(ctx context.Context) LookupJobResultOutput

func (LookupJobResultOutput) TrinoJob added in v0.26.0

Optional. Job is a Trino job.

func (LookupJobResultOutput) YarnApplications added in v0.8.0

The collection of YARN applications spun up by this job.Beta Feature: This report is available for testing purposes only. It might be changed before final release.

type LookupNodeGroupArgs added in v0.28.0

type LookupNodeGroupArgs struct {
	ClusterId   string  `pulumi:"clusterId"`
	NodeGroupId string  `pulumi:"nodeGroupId"`
	Project     *string `pulumi:"project"`
	RegionId    string  `pulumi:"regionId"`
}

type LookupNodeGroupOutputArgs added in v0.28.0

type LookupNodeGroupOutputArgs struct {
	ClusterId   pulumi.StringInput    `pulumi:"clusterId"`
	NodeGroupId pulumi.StringInput    `pulumi:"nodeGroupId"`
	Project     pulumi.StringPtrInput `pulumi:"project"`
	RegionId    pulumi.StringInput    `pulumi:"regionId"`
}

func (LookupNodeGroupOutputArgs) ElementType added in v0.28.0

func (LookupNodeGroupOutputArgs) ElementType() reflect.Type

type LookupNodeGroupResult added in v0.28.0

type LookupNodeGroupResult struct {
	// Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn.
	Labels map[string]string `pulumi:"labels"`
	// The Node group resource name (https://aip.dev/122).
	Name string `pulumi:"name"`
	// Optional. The node group instance group configuration.
	NodeGroupConfig InstanceGroupConfigResponse `pulumi:"nodeGroupConfig"`
	// Node group roles.
	Roles []string `pulumi:"roles"`
}

func LookupNodeGroup added in v0.28.0

func LookupNodeGroup(ctx *pulumi.Context, args *LookupNodeGroupArgs, opts ...pulumi.InvokeOption) (*LookupNodeGroupResult, error)

Gets the resource representation for a node group in a cluster.

type LookupNodeGroupResultOutput added in v0.28.0

type LookupNodeGroupResultOutput struct{ *pulumi.OutputState }

func LookupNodeGroupOutput added in v0.28.0

func (LookupNodeGroupResultOutput) ElementType added in v0.28.0

func (LookupNodeGroupResultOutput) Labels added in v0.28.0

Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn.

func (LookupNodeGroupResultOutput) Name added in v0.28.0

The Node group resource name (https://aip.dev/122).

func (LookupNodeGroupResultOutput) NodeGroupConfig added in v0.28.0

Optional. The node group instance group configuration.

func (LookupNodeGroupResultOutput) Roles added in v0.28.0

Node group roles.

func (LookupNodeGroupResultOutput) ToLookupNodeGroupResultOutput added in v0.28.0

func (o LookupNodeGroupResultOutput) ToLookupNodeGroupResultOutput() LookupNodeGroupResultOutput

func (LookupNodeGroupResultOutput) ToLookupNodeGroupResultOutputWithContext added in v0.28.0

func (o LookupNodeGroupResultOutput) ToLookupNodeGroupResultOutputWithContext(ctx context.Context) LookupNodeGroupResultOutput

type LookupRegionAutoscalingPolicyIamPolicyArgs added in v0.4.0

type LookupRegionAutoscalingPolicyIamPolicyArgs struct {
	AutoscalingPolicyId string  `pulumi:"autoscalingPolicyId"`
	Project             *string `pulumi:"project"`
	RegionId            string  `pulumi:"regionId"`
}

type LookupRegionAutoscalingPolicyIamPolicyOutputArgs added in v0.8.0

type LookupRegionAutoscalingPolicyIamPolicyOutputArgs struct {
	AutoscalingPolicyId pulumi.StringInput    `pulumi:"autoscalingPolicyId"`
	Project             pulumi.StringPtrInput `pulumi:"project"`
	RegionId            pulumi.StringInput    `pulumi:"regionId"`
}

func (LookupRegionAutoscalingPolicyIamPolicyOutputArgs) ElementType added in v0.8.0

type LookupRegionAutoscalingPolicyIamPolicyResult added in v0.4.0

type LookupRegionAutoscalingPolicyIamPolicyResult struct {
	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings []BindingResponse `pulumi:"bindings"`
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag string `pulumi:"etag"`
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version int `pulumi:"version"`
}

func LookupRegionAutoscalingPolicyIamPolicy added in v0.4.0

Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.

type LookupRegionAutoscalingPolicyIamPolicyResultOutput added in v0.8.0

type LookupRegionAutoscalingPolicyIamPolicyResultOutput struct{ *pulumi.OutputState }

func (LookupRegionAutoscalingPolicyIamPolicyResultOutput) Bindings added in v0.8.0

Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.

func (LookupRegionAutoscalingPolicyIamPolicyResultOutput) ElementType added in v0.8.0

func (LookupRegionAutoscalingPolicyIamPolicyResultOutput) Etag added in v0.8.0

etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.

func (LookupRegionAutoscalingPolicyIamPolicyResultOutput) ToLookupRegionAutoscalingPolicyIamPolicyResultOutput added in v0.8.0

func (o LookupRegionAutoscalingPolicyIamPolicyResultOutput) ToLookupRegionAutoscalingPolicyIamPolicyResultOutput() LookupRegionAutoscalingPolicyIamPolicyResultOutput

func (LookupRegionAutoscalingPolicyIamPolicyResultOutput) ToLookupRegionAutoscalingPolicyIamPolicyResultOutputWithContext added in v0.8.0

func (o LookupRegionAutoscalingPolicyIamPolicyResultOutput) ToLookupRegionAutoscalingPolicyIamPolicyResultOutputWithContext(ctx context.Context) LookupRegionAutoscalingPolicyIamPolicyResultOutput

func (LookupRegionAutoscalingPolicyIamPolicyResultOutput) Version added in v0.8.0

Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).

type LookupRegionClusterIamPolicyArgs added in v0.4.0

type LookupRegionClusterIamPolicyArgs struct {
	ClusterId string  `pulumi:"clusterId"`
	Project   *string `pulumi:"project"`
	RegionId  string  `pulumi:"regionId"`
}

type LookupRegionClusterIamPolicyOutputArgs added in v0.8.0

type LookupRegionClusterIamPolicyOutputArgs struct {
	ClusterId pulumi.StringInput    `pulumi:"clusterId"`
	Project   pulumi.StringPtrInput `pulumi:"project"`
	RegionId  pulumi.StringInput    `pulumi:"regionId"`
}

func (LookupRegionClusterIamPolicyOutputArgs) ElementType added in v0.8.0

type LookupRegionClusterIamPolicyResult added in v0.4.0

type LookupRegionClusterIamPolicyResult struct {
	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings []BindingResponse `pulumi:"bindings"`
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag string `pulumi:"etag"`
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version int `pulumi:"version"`
}

func LookupRegionClusterIamPolicy added in v0.4.0

func LookupRegionClusterIamPolicy(ctx *pulumi.Context, args *LookupRegionClusterIamPolicyArgs, opts ...pulumi.InvokeOption) (*LookupRegionClusterIamPolicyResult, error)

Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.

type LookupRegionClusterIamPolicyResultOutput added in v0.8.0

type LookupRegionClusterIamPolicyResultOutput struct{ *pulumi.OutputState }

func (LookupRegionClusterIamPolicyResultOutput) Bindings added in v0.8.0

Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.

func (LookupRegionClusterIamPolicyResultOutput) ElementType added in v0.8.0

func (LookupRegionClusterIamPolicyResultOutput) Etag added in v0.8.0

etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.

func (LookupRegionClusterIamPolicyResultOutput) ToLookupRegionClusterIamPolicyResultOutput added in v0.8.0

func (o LookupRegionClusterIamPolicyResultOutput) ToLookupRegionClusterIamPolicyResultOutput() LookupRegionClusterIamPolicyResultOutput

func (LookupRegionClusterIamPolicyResultOutput) ToLookupRegionClusterIamPolicyResultOutputWithContext added in v0.8.0

func (o LookupRegionClusterIamPolicyResultOutput) ToLookupRegionClusterIamPolicyResultOutputWithContext(ctx context.Context) LookupRegionClusterIamPolicyResultOutput

func (LookupRegionClusterIamPolicyResultOutput) Version added in v0.8.0

Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).

type LookupRegionJobIamPolicyArgs added in v0.4.0

type LookupRegionJobIamPolicyArgs struct {
	JobId    string  `pulumi:"jobId"`
	Project  *string `pulumi:"project"`
	RegionId string  `pulumi:"regionId"`
}

type LookupRegionJobIamPolicyOutputArgs added in v0.8.0

type LookupRegionJobIamPolicyOutputArgs struct {
	JobId    pulumi.StringInput    `pulumi:"jobId"`
	Project  pulumi.StringPtrInput `pulumi:"project"`
	RegionId pulumi.StringInput    `pulumi:"regionId"`
}

func (LookupRegionJobIamPolicyOutputArgs) ElementType added in v0.8.0

type LookupRegionJobIamPolicyResult added in v0.4.0

type LookupRegionJobIamPolicyResult struct {
	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings []BindingResponse `pulumi:"bindings"`
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag string `pulumi:"etag"`
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version int `pulumi:"version"`
}

func LookupRegionJobIamPolicy added in v0.4.0

func LookupRegionJobIamPolicy(ctx *pulumi.Context, args *LookupRegionJobIamPolicyArgs, opts ...pulumi.InvokeOption) (*LookupRegionJobIamPolicyResult, error)

Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.

type LookupRegionJobIamPolicyResultOutput added in v0.8.0

type LookupRegionJobIamPolicyResultOutput struct{ *pulumi.OutputState }

func (LookupRegionJobIamPolicyResultOutput) Bindings added in v0.8.0

Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.

func (LookupRegionJobIamPolicyResultOutput) ElementType added in v0.8.0

func (LookupRegionJobIamPolicyResultOutput) Etag added in v0.8.0

etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.

func (LookupRegionJobIamPolicyResultOutput) ToLookupRegionJobIamPolicyResultOutput added in v0.8.0

func (o LookupRegionJobIamPolicyResultOutput) ToLookupRegionJobIamPolicyResultOutput() LookupRegionJobIamPolicyResultOutput

func (LookupRegionJobIamPolicyResultOutput) ToLookupRegionJobIamPolicyResultOutputWithContext added in v0.8.0

func (o LookupRegionJobIamPolicyResultOutput) ToLookupRegionJobIamPolicyResultOutputWithContext(ctx context.Context) LookupRegionJobIamPolicyResultOutput

func (LookupRegionJobIamPolicyResultOutput) Version added in v0.8.0

Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).

type LookupRegionOperationIamPolicyArgs added in v0.4.0

type LookupRegionOperationIamPolicyArgs struct {
	OperationId string  `pulumi:"operationId"`
	Project     *string `pulumi:"project"`
	RegionId    string  `pulumi:"regionId"`
}

type LookupRegionOperationIamPolicyOutputArgs added in v0.8.0

type LookupRegionOperationIamPolicyOutputArgs struct {
	OperationId pulumi.StringInput    `pulumi:"operationId"`
	Project     pulumi.StringPtrInput `pulumi:"project"`
	RegionId    pulumi.StringInput    `pulumi:"regionId"`
}

func (LookupRegionOperationIamPolicyOutputArgs) ElementType added in v0.8.0

type LookupRegionOperationIamPolicyResult added in v0.4.0

type LookupRegionOperationIamPolicyResult struct {
	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings []BindingResponse `pulumi:"bindings"`
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag string `pulumi:"etag"`
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version int `pulumi:"version"`
}

func LookupRegionOperationIamPolicy added in v0.4.0

Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.

type LookupRegionOperationIamPolicyResultOutput added in v0.8.0

type LookupRegionOperationIamPolicyResultOutput struct{ *pulumi.OutputState }

func (LookupRegionOperationIamPolicyResultOutput) Bindings added in v0.8.0

Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.

func (LookupRegionOperationIamPolicyResultOutput) ElementType added in v0.8.0

func (LookupRegionOperationIamPolicyResultOutput) Etag added in v0.8.0

etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.

func (LookupRegionOperationIamPolicyResultOutput) ToLookupRegionOperationIamPolicyResultOutput added in v0.8.0

func (o LookupRegionOperationIamPolicyResultOutput) ToLookupRegionOperationIamPolicyResultOutput() LookupRegionOperationIamPolicyResultOutput

func (LookupRegionOperationIamPolicyResultOutput) ToLookupRegionOperationIamPolicyResultOutputWithContext added in v0.8.0

func (o LookupRegionOperationIamPolicyResultOutput) ToLookupRegionOperationIamPolicyResultOutputWithContext(ctx context.Context) LookupRegionOperationIamPolicyResultOutput

func (LookupRegionOperationIamPolicyResultOutput) Version added in v0.8.0

Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).

type LookupRegionWorkflowTemplateIamPolicyArgs added in v0.4.0

type LookupRegionWorkflowTemplateIamPolicyArgs struct {
	Project            *string `pulumi:"project"`
	RegionId           string  `pulumi:"regionId"`
	WorkflowTemplateId string  `pulumi:"workflowTemplateId"`
}

type LookupRegionWorkflowTemplateIamPolicyOutputArgs added in v0.8.0

type LookupRegionWorkflowTemplateIamPolicyOutputArgs struct {
	Project            pulumi.StringPtrInput `pulumi:"project"`
	RegionId           pulumi.StringInput    `pulumi:"regionId"`
	WorkflowTemplateId pulumi.StringInput    `pulumi:"workflowTemplateId"`
}

func (LookupRegionWorkflowTemplateIamPolicyOutputArgs) ElementType added in v0.8.0

type LookupRegionWorkflowTemplateIamPolicyResult added in v0.4.0

type LookupRegionWorkflowTemplateIamPolicyResult struct {
	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings []BindingResponse `pulumi:"bindings"`
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag string `pulumi:"etag"`
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version int `pulumi:"version"`
}

func LookupRegionWorkflowTemplateIamPolicy added in v0.4.0

Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.

type LookupRegionWorkflowTemplateIamPolicyResultOutput added in v0.8.0

type LookupRegionWorkflowTemplateIamPolicyResultOutput struct{ *pulumi.OutputState }

func (LookupRegionWorkflowTemplateIamPolicyResultOutput) Bindings added in v0.8.0

Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.

func (LookupRegionWorkflowTemplateIamPolicyResultOutput) ElementType added in v0.8.0

func (LookupRegionWorkflowTemplateIamPolicyResultOutput) Etag added in v0.8.0

etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.

func (LookupRegionWorkflowTemplateIamPolicyResultOutput) ToLookupRegionWorkflowTemplateIamPolicyResultOutput added in v0.8.0

func (o LookupRegionWorkflowTemplateIamPolicyResultOutput) ToLookupRegionWorkflowTemplateIamPolicyResultOutput() LookupRegionWorkflowTemplateIamPolicyResultOutput

func (LookupRegionWorkflowTemplateIamPolicyResultOutput) ToLookupRegionWorkflowTemplateIamPolicyResultOutputWithContext added in v0.8.0

func (o LookupRegionWorkflowTemplateIamPolicyResultOutput) ToLookupRegionWorkflowTemplateIamPolicyResultOutputWithContext(ctx context.Context) LookupRegionWorkflowTemplateIamPolicyResultOutput

func (LookupRegionWorkflowTemplateIamPolicyResultOutput) Version added in v0.8.0

Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).

type LookupSessionArgs added in v0.32.0

type LookupSessionArgs struct {
	Location  string  `pulumi:"location"`
	Project   *string `pulumi:"project"`
	SessionId string  `pulumi:"sessionId"`
}

type LookupSessionOutputArgs added in v0.32.0

type LookupSessionOutputArgs struct {
	Location  pulumi.StringInput    `pulumi:"location"`
	Project   pulumi.StringPtrInput `pulumi:"project"`
	SessionId pulumi.StringInput    `pulumi:"sessionId"`
}

func (LookupSessionOutputArgs) ElementType added in v0.32.0

func (LookupSessionOutputArgs) ElementType() reflect.Type

type LookupSessionResult added in v0.32.0

type LookupSessionResult struct {
	// The time when the session was created.
	CreateTime string `pulumi:"createTime"`
	// The email address of the user who created the session.
	Creator string `pulumi:"creator"`
	// Optional. Environment configuration for the session execution.
	EnvironmentConfig EnvironmentConfigResponse `pulumi:"environmentConfig"`
	// Optional. Jupyter session config.
	JupyterSession JupyterConfigResponse `pulumi:"jupyterSession"`
	// Optional. The labels to associate with the session. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.
	Labels map[string]string `pulumi:"labels"`
	// The resource name of the session.
	Name string `pulumi:"name"`
	// Optional. Runtime configuration for the session execution.
	RuntimeConfig RuntimeConfigResponse `pulumi:"runtimeConfig"`
	// Runtime information about session execution.
	RuntimeInfo RuntimeInfoResponse `pulumi:"runtimeInfo"`
	// Optional. The session template used by the session.Only resource names, including project ID and location, are valid.Example: * https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id] * projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id]The template must be in the same project and Dataproc region as the session.
	SessionTemplate string `pulumi:"sessionTemplate"`
	// A state of the session.
	State string `pulumi:"state"`
	// Historical state information for the session.
	StateHistory []SessionStateHistoryResponse `pulumi:"stateHistory"`
	// Session state details, such as the failure description if the state is FAILED.
	StateMessage string `pulumi:"stateMessage"`
	// The time when the session entered the current state.
	StateTime string `pulumi:"stateTime"`
	// Optional. The email address of the user who owns the session.
	User string `pulumi:"user"`
	// A session UUID (Unique Universal Identifier). The service generates this value when it creates the session.
	Uuid string `pulumi:"uuid"`
}

func LookupSession added in v0.32.0

func LookupSession(ctx *pulumi.Context, args *LookupSessionArgs, opts ...pulumi.InvokeOption) (*LookupSessionResult, error)

Gets the resource representation for an interactive session.

type LookupSessionResultOutput added in v0.32.0

type LookupSessionResultOutput struct{ *pulumi.OutputState }

func LookupSessionOutput added in v0.32.0

func LookupSessionOutput(ctx *pulumi.Context, args LookupSessionOutputArgs, opts ...pulumi.InvokeOption) LookupSessionResultOutput

func (LookupSessionResultOutput) CreateTime added in v0.32.0

The time when the session was created.

func (LookupSessionResultOutput) Creator added in v0.32.0

The email address of the user who created the session.

func (LookupSessionResultOutput) ElementType added in v0.32.0

func (LookupSessionResultOutput) ElementType() reflect.Type

func (LookupSessionResultOutput) EnvironmentConfig added in v0.32.0

Optional. Environment configuration for the session execution.

func (LookupSessionResultOutput) JupyterSession added in v0.32.0

Optional. Jupyter session config.

func (LookupSessionResultOutput) Labels added in v0.32.0

Optional. The labels to associate with the session. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.

func (LookupSessionResultOutput) Name added in v0.32.0

The resource name of the session.

func (LookupSessionResultOutput) RuntimeConfig added in v0.32.0

Optional. Runtime configuration for the session execution.

func (LookupSessionResultOutput) RuntimeInfo added in v0.32.0

Runtime information about session execution.

func (LookupSessionResultOutput) SessionTemplate added in v0.32.0

func (o LookupSessionResultOutput) SessionTemplate() pulumi.StringOutput

Optional. The session template used by the session.Only resource names, including project ID and location, are valid.Example: * https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id] * projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id]The template must be in the same project and Dataproc region as the session.

func (LookupSessionResultOutput) State added in v0.32.0

A state of the session.

func (LookupSessionResultOutput) StateHistory added in v0.32.0

Historical state information for the session.

func (LookupSessionResultOutput) StateMessage added in v0.32.0

Session state details, such as the failure description if the state is FAILED.

func (LookupSessionResultOutput) StateTime added in v0.32.0

The time when the session entered the current state.

func (LookupSessionResultOutput) ToLookupSessionResultOutput added in v0.32.0

func (o LookupSessionResultOutput) ToLookupSessionResultOutput() LookupSessionResultOutput

func (LookupSessionResultOutput) ToLookupSessionResultOutputWithContext added in v0.32.0

func (o LookupSessionResultOutput) ToLookupSessionResultOutputWithContext(ctx context.Context) LookupSessionResultOutput

func (LookupSessionResultOutput) User added in v0.32.0

Optional. The email address of the user who owns the session.

func (LookupSessionResultOutput) Uuid added in v0.32.0

A session UUID (Unique Universal Identifier). The service generates this value when it creates the session.

type LookupSessionTemplateArgs added in v0.32.0

type LookupSessionTemplateArgs struct {
	Location          string  `pulumi:"location"`
	Project           *string `pulumi:"project"`
	SessionTemplateId string  `pulumi:"sessionTemplateId"`
}

type LookupSessionTemplateOutputArgs added in v0.32.0

type LookupSessionTemplateOutputArgs struct {
	Location          pulumi.StringInput    `pulumi:"location"`
	Project           pulumi.StringPtrInput `pulumi:"project"`
	SessionTemplateId pulumi.StringInput    `pulumi:"sessionTemplateId"`
}

func (LookupSessionTemplateOutputArgs) ElementType added in v0.32.0

type LookupSessionTemplateResult added in v0.32.0

type LookupSessionTemplateResult struct {
	// The time when the template was created.
	CreateTime string `pulumi:"createTime"`
	// The email address of the user who created the template.
	Creator string `pulumi:"creator"`
	// Optional. Brief description of the template.
	Description string `pulumi:"description"`
	// Optional. Environment configuration for session execution.
	EnvironmentConfig EnvironmentConfigResponse `pulumi:"environmentConfig"`
	// Optional. Jupyter session config.
	JupyterSession JupyterConfigResponse `pulumi:"jupyterSession"`
	// Optional. Labels to associate with sessions created using this template. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.
	Labels map[string]string `pulumi:"labels"`
	// The resource name of the session template.
	Name string `pulumi:"name"`
	// Optional. Runtime configuration for session execution.
	RuntimeConfig RuntimeConfigResponse `pulumi:"runtimeConfig"`
	// The time the template was last updated.
	UpdateTime string `pulumi:"updateTime"`
	// A session template UUID (Unique Universal Identifier). The service generates this value when it creates the session template.
	Uuid string `pulumi:"uuid"`
}

func LookupSessionTemplate added in v0.32.0

func LookupSessionTemplate(ctx *pulumi.Context, args *LookupSessionTemplateArgs, opts ...pulumi.InvokeOption) (*LookupSessionTemplateResult, error)

Gets the resource representation for a session template.

type LookupSessionTemplateResultOutput added in v0.32.0

type LookupSessionTemplateResultOutput struct{ *pulumi.OutputState }

func LookupSessionTemplateOutput added in v0.32.0

func (LookupSessionTemplateResultOutput) CreateTime added in v0.32.0

The time when the template was created.

func (LookupSessionTemplateResultOutput) Creator added in v0.32.0

The email address of the user who created the template.

func (LookupSessionTemplateResultOutput) Description added in v0.32.0

Optional. Brief description of the template.

func (LookupSessionTemplateResultOutput) ElementType added in v0.32.0

func (LookupSessionTemplateResultOutput) EnvironmentConfig added in v0.32.0

Optional. Environment configuration for session execution.

func (LookupSessionTemplateResultOutput) JupyterSession added in v0.32.0

Optional. Jupyter session config.

func (LookupSessionTemplateResultOutput) Labels added in v0.32.0

Optional. Labels to associate with sessions created using this template. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.

func (LookupSessionTemplateResultOutput) Name added in v0.32.0

The resource name of the session template.

func (LookupSessionTemplateResultOutput) RuntimeConfig added in v0.32.0

Optional. Runtime configuration for session execution.

func (LookupSessionTemplateResultOutput) ToLookupSessionTemplateResultOutput added in v0.32.0

func (o LookupSessionTemplateResultOutput) ToLookupSessionTemplateResultOutput() LookupSessionTemplateResultOutput

func (LookupSessionTemplateResultOutput) ToLookupSessionTemplateResultOutputWithContext added in v0.32.0

func (o LookupSessionTemplateResultOutput) ToLookupSessionTemplateResultOutputWithContext(ctx context.Context) LookupSessionTemplateResultOutput

func (LookupSessionTemplateResultOutput) UpdateTime added in v0.32.0

The time the template was last updated.

func (LookupSessionTemplateResultOutput) Uuid added in v0.32.0

A session template UUID (Unique Universal Identifier). The service generates this value when it creates the session template.

type LookupWorkflowTemplateArgs added in v0.4.0

type LookupWorkflowTemplateArgs struct {
	Location           string  `pulumi:"location"`
	Project            *string `pulumi:"project"`
	Version            *int    `pulumi:"version"`
	WorkflowTemplateId string  `pulumi:"workflowTemplateId"`
}

type LookupWorkflowTemplateIamPolicyArgs added in v0.4.0

type LookupWorkflowTemplateIamPolicyArgs struct {
	Location           string  `pulumi:"location"`
	Project            *string `pulumi:"project"`
	WorkflowTemplateId string  `pulumi:"workflowTemplateId"`
}

type LookupWorkflowTemplateIamPolicyOutputArgs added in v0.8.0

type LookupWorkflowTemplateIamPolicyOutputArgs struct {
	Location           pulumi.StringInput    `pulumi:"location"`
	Project            pulumi.StringPtrInput `pulumi:"project"`
	WorkflowTemplateId pulumi.StringInput    `pulumi:"workflowTemplateId"`
}

func (LookupWorkflowTemplateIamPolicyOutputArgs) ElementType added in v0.8.0

type LookupWorkflowTemplateIamPolicyResult added in v0.4.0

type LookupWorkflowTemplateIamPolicyResult struct {
	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings []BindingResponse `pulumi:"bindings"`
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag string `pulumi:"etag"`
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version int `pulumi:"version"`
}

func LookupWorkflowTemplateIamPolicy added in v0.4.0

Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.

type LookupWorkflowTemplateIamPolicyResultOutput added in v0.8.0

type LookupWorkflowTemplateIamPolicyResultOutput struct{ *pulumi.OutputState }

func (LookupWorkflowTemplateIamPolicyResultOutput) Bindings added in v0.8.0

Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.

func (LookupWorkflowTemplateIamPolicyResultOutput) ElementType added in v0.8.0

func (LookupWorkflowTemplateIamPolicyResultOutput) Etag added in v0.8.0

etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.

func (LookupWorkflowTemplateIamPolicyResultOutput) ToLookupWorkflowTemplateIamPolicyResultOutput added in v0.8.0

func (o LookupWorkflowTemplateIamPolicyResultOutput) ToLookupWorkflowTemplateIamPolicyResultOutput() LookupWorkflowTemplateIamPolicyResultOutput

func (LookupWorkflowTemplateIamPolicyResultOutput) ToLookupWorkflowTemplateIamPolicyResultOutputWithContext added in v0.8.0

func (o LookupWorkflowTemplateIamPolicyResultOutput) ToLookupWorkflowTemplateIamPolicyResultOutputWithContext(ctx context.Context) LookupWorkflowTemplateIamPolicyResultOutput

func (LookupWorkflowTemplateIamPolicyResultOutput) Version added in v0.8.0

Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).

type LookupWorkflowTemplateOutputArgs added in v0.8.0

type LookupWorkflowTemplateOutputArgs struct {
	Location           pulumi.StringInput    `pulumi:"location"`
	Project            pulumi.StringPtrInput `pulumi:"project"`
	Version            pulumi.IntPtrInput    `pulumi:"version"`
	WorkflowTemplateId pulumi.StringInput    `pulumi:"workflowTemplateId"`
}

func (LookupWorkflowTemplateOutputArgs) ElementType added in v0.8.0

type LookupWorkflowTemplateResult added in v0.4.0

type LookupWorkflowTemplateResult struct {
	// The time template was created.
	CreateTime string `pulumi:"createTime"`
	// Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
	DagTimeout string `pulumi:"dagTimeout"`
	// Optional. Encryption settings for the encrypting customer core content.
	EncryptionConfig GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigResponse `pulumi:"encryptionConfig"`
	// The Directed Acyclic Graph of Jobs to submit.
	Jobs []OrderedJobResponse `pulumi:"jobs"`
	// Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance.Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be associated with a template.
	Labels map[string]string `pulumi:"labels"`
	// The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
	Name string `pulumi:"name"`
	// Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
	Parameters []TemplateParameterResponse `pulumi:"parameters"`
	// WorkflowTemplate scheduling information.
	Placement WorkflowTemplatePlacementResponse `pulumi:"placement"`
	// The time template was last updated.
	UpdateTime string `pulumi:"updateTime"`
	// Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request.
	Version int `pulumi:"version"`
}

func LookupWorkflowTemplate added in v0.4.0

func LookupWorkflowTemplate(ctx *pulumi.Context, args *LookupWorkflowTemplateArgs, opts ...pulumi.InvokeOption) (*LookupWorkflowTemplateResult, error)

Retrieves the latest workflow template.Can retrieve previously instantiated template by specifying optional version parameter.

type LookupWorkflowTemplateResultOutput added in v0.8.0

type LookupWorkflowTemplateResultOutput struct{ *pulumi.OutputState }

func LookupWorkflowTemplateOutput added in v0.8.0

func (LookupWorkflowTemplateResultOutput) CreateTime added in v0.8.0

The time template was created.

func (LookupWorkflowTemplateResultOutput) DagTimeout added in v0.8.0

Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.

func (LookupWorkflowTemplateResultOutput) ElementType added in v0.8.0

func (LookupWorkflowTemplateResultOutput) EncryptionConfig added in v0.32.0

Optional. Encryption settings for the encrypting customer core content.

func (LookupWorkflowTemplateResultOutput) Jobs added in v0.8.0

The Directed Acyclic Graph of Jobs to submit.

func (LookupWorkflowTemplateResultOutput) Labels added in v0.8.0

Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance.Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be associated with a template.

func (LookupWorkflowTemplateResultOutput) Name added in v0.8.0

The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}

func (LookupWorkflowTemplateResultOutput) Parameters added in v0.8.0

Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.

func (LookupWorkflowTemplateResultOutput) Placement added in v0.8.0

WorkflowTemplate scheduling information.

func (LookupWorkflowTemplateResultOutput) ToLookupWorkflowTemplateResultOutput added in v0.8.0

func (o LookupWorkflowTemplateResultOutput) ToLookupWorkflowTemplateResultOutput() LookupWorkflowTemplateResultOutput

func (LookupWorkflowTemplateResultOutput) ToLookupWorkflowTemplateResultOutputWithContext added in v0.8.0

func (o LookupWorkflowTemplateResultOutput) ToLookupWorkflowTemplateResultOutputWithContext(ctx context.Context) LookupWorkflowTemplateResultOutput

func (LookupWorkflowTemplateResultOutput) UpdateTime added in v0.8.0

The time template was last updated.

func (LookupWorkflowTemplateResultOutput) Version added in v0.8.0

Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request.

type ManagedCluster

type ManagedCluster struct {
	// The cluster name prefix. A unique cluster name will be formed by appending a random suffix.The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
	ClusterName string `pulumi:"clusterName"`
	// The cluster configuration.
	Config ClusterConfig `pulumi:"config"`
	// Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given cluster.
	Labels map[string]string `pulumi:"labels"`
}

Cluster that is managed by the workflow.

type ManagedClusterArgs

type ManagedClusterArgs struct {
	// The cluster name prefix. A unique cluster name will be formed by appending a random suffix.The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
	ClusterName pulumi.StringInput `pulumi:"clusterName"`
	// The cluster configuration.
	Config ClusterConfigInput `pulumi:"config"`
	// Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given cluster.
	Labels pulumi.StringMapInput `pulumi:"labels"`
}

Cluster that is managed by the workflow.

func (ManagedClusterArgs) ElementType

func (ManagedClusterArgs) ElementType() reflect.Type

func (ManagedClusterArgs) ToManagedClusterOutput

func (i ManagedClusterArgs) ToManagedClusterOutput() ManagedClusterOutput

func (ManagedClusterArgs) ToManagedClusterOutputWithContext

func (i ManagedClusterArgs) ToManagedClusterOutputWithContext(ctx context.Context) ManagedClusterOutput

func (ManagedClusterArgs) ToManagedClusterPtrOutput

func (i ManagedClusterArgs) ToManagedClusterPtrOutput() ManagedClusterPtrOutput

func (ManagedClusterArgs) ToManagedClusterPtrOutputWithContext

func (i ManagedClusterArgs) ToManagedClusterPtrOutputWithContext(ctx context.Context) ManagedClusterPtrOutput

type ManagedClusterInput

type ManagedClusterInput interface {
	pulumi.Input

	ToManagedClusterOutput() ManagedClusterOutput
	ToManagedClusterOutputWithContext(context.Context) ManagedClusterOutput
}

ManagedClusterInput is an input type that accepts ManagedClusterArgs and ManagedClusterOutput values. You can construct a concrete instance of `ManagedClusterInput` via:

ManagedClusterArgs{...}

type ManagedClusterOutput

type ManagedClusterOutput struct{ *pulumi.OutputState }

Cluster that is managed by the workflow.

func (ManagedClusterOutput) ClusterName

func (o ManagedClusterOutput) ClusterName() pulumi.StringOutput

The cluster name prefix. A unique cluster name will be formed by appending a random suffix.The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.

func (ManagedClusterOutput) Config

The cluster configuration.

func (ManagedClusterOutput) ElementType

func (ManagedClusterOutput) ElementType() reflect.Type

func (ManagedClusterOutput) Labels

Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given cluster.

func (ManagedClusterOutput) ToManagedClusterOutput

func (o ManagedClusterOutput) ToManagedClusterOutput() ManagedClusterOutput

func (ManagedClusterOutput) ToManagedClusterOutputWithContext

func (o ManagedClusterOutput) ToManagedClusterOutputWithContext(ctx context.Context) ManagedClusterOutput

func (ManagedClusterOutput) ToManagedClusterPtrOutput

func (o ManagedClusterOutput) ToManagedClusterPtrOutput() ManagedClusterPtrOutput

func (ManagedClusterOutput) ToManagedClusterPtrOutputWithContext

func (o ManagedClusterOutput) ToManagedClusterPtrOutputWithContext(ctx context.Context) ManagedClusterPtrOutput

type ManagedClusterPtrInput

type ManagedClusterPtrInput interface {
	pulumi.Input

	ToManagedClusterPtrOutput() ManagedClusterPtrOutput
	ToManagedClusterPtrOutputWithContext(context.Context) ManagedClusterPtrOutput
}

ManagedClusterPtrInput is an input type that accepts ManagedClusterArgs, ManagedClusterPtr and ManagedClusterPtrOutput values. You can construct a concrete instance of `ManagedClusterPtrInput` via:

        ManagedClusterArgs{...}

or:

        nil

type ManagedClusterPtrOutput

type ManagedClusterPtrOutput struct{ *pulumi.OutputState }

func (ManagedClusterPtrOutput) ClusterName

The cluster name prefix. A unique cluster name will be formed by appending a random suffix.The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.

func (ManagedClusterPtrOutput) Config

The cluster configuration.

func (ManagedClusterPtrOutput) Elem

func (ManagedClusterPtrOutput) ElementType

func (ManagedClusterPtrOutput) ElementType() reflect.Type

func (ManagedClusterPtrOutput) Labels

Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given cluster.

func (ManagedClusterPtrOutput) ToManagedClusterPtrOutput

func (o ManagedClusterPtrOutput) ToManagedClusterPtrOutput() ManagedClusterPtrOutput

func (ManagedClusterPtrOutput) ToManagedClusterPtrOutputWithContext

func (o ManagedClusterPtrOutput) ToManagedClusterPtrOutputWithContext(ctx context.Context) ManagedClusterPtrOutput

type ManagedClusterResponse

type ManagedClusterResponse struct {
	// The cluster name prefix. A unique cluster name will be formed by appending a random suffix.The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
	ClusterName string `pulumi:"clusterName"`
	// The cluster configuration.
	Config ClusterConfigResponse `pulumi:"config"`
	// Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given cluster.
	Labels map[string]string `pulumi:"labels"`
}

Cluster that is managed by the workflow.

type ManagedClusterResponseOutput

type ManagedClusterResponseOutput struct{ *pulumi.OutputState }

Cluster that is managed by the workflow.

func (ManagedClusterResponseOutput) ClusterName

The cluster name prefix. A unique cluster name will be formed by appending a random suffix.The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.

func (ManagedClusterResponseOutput) Config

The cluster configuration.

func (ManagedClusterResponseOutput) ElementType

func (ManagedClusterResponseOutput) Labels

Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given cluster.

func (ManagedClusterResponseOutput) ToManagedClusterResponseOutput

func (o ManagedClusterResponseOutput) ToManagedClusterResponseOutput() ManagedClusterResponseOutput

func (ManagedClusterResponseOutput) ToManagedClusterResponseOutputWithContext

func (o ManagedClusterResponseOutput) ToManagedClusterResponseOutputWithContext(ctx context.Context) ManagedClusterResponseOutput

type ManagedGroupConfigResponse

type ManagedGroupConfigResponse struct {
	// The name of the Instance Group Manager for this group.
	InstanceGroupManagerName string `pulumi:"instanceGroupManagerName"`
	// The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm.
	InstanceGroupManagerUri string `pulumi:"instanceGroupManagerUri"`
	// The name of the Instance Template used for the Managed Instance Group.
	InstanceTemplateName string `pulumi:"instanceTemplateName"`
}

Specifies the resources used to actively manage an instance group.

type ManagedGroupConfigResponseOutput

type ManagedGroupConfigResponseOutput struct{ *pulumi.OutputState }

Specifies the resources used to actively manage an instance group.

func (ManagedGroupConfigResponseOutput) ElementType

func (ManagedGroupConfigResponseOutput) InstanceGroupManagerName

func (o ManagedGroupConfigResponseOutput) InstanceGroupManagerName() pulumi.StringOutput

The name of the Instance Group Manager for this group.

func (ManagedGroupConfigResponseOutput) InstanceGroupManagerUri added in v0.32.0

func (o ManagedGroupConfigResponseOutput) InstanceGroupManagerUri() pulumi.StringOutput

The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm.

func (ManagedGroupConfigResponseOutput) InstanceTemplateName

func (o ManagedGroupConfigResponseOutput) InstanceTemplateName() pulumi.StringOutput

The name of the Instance Template used for the Managed Instance Group.

func (ManagedGroupConfigResponseOutput) ToManagedGroupConfigResponseOutput

func (o ManagedGroupConfigResponseOutput) ToManagedGroupConfigResponseOutput() ManagedGroupConfigResponseOutput

func (ManagedGroupConfigResponseOutput) ToManagedGroupConfigResponseOutputWithContext

func (o ManagedGroupConfigResponseOutput) ToManagedGroupConfigResponseOutputWithContext(ctx context.Context) ManagedGroupConfigResponseOutput

type MetastoreConfig

type MetastoreConfig struct {
	// Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name]
	DataprocMetastoreService string `pulumi:"dataprocMetastoreService"`
}

Specifies a Metastore configuration.

type MetastoreConfigArgs

type MetastoreConfigArgs struct {
	// Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name]
	DataprocMetastoreService pulumi.StringInput `pulumi:"dataprocMetastoreService"`
}

Specifies a Metastore configuration.

func (MetastoreConfigArgs) ElementType

func (MetastoreConfigArgs) ElementType() reflect.Type

func (MetastoreConfigArgs) ToMetastoreConfigOutput

func (i MetastoreConfigArgs) ToMetastoreConfigOutput() MetastoreConfigOutput

func (MetastoreConfigArgs) ToMetastoreConfigOutputWithContext

func (i MetastoreConfigArgs) ToMetastoreConfigOutputWithContext(ctx context.Context) MetastoreConfigOutput

func (MetastoreConfigArgs) ToMetastoreConfigPtrOutput

func (i MetastoreConfigArgs) ToMetastoreConfigPtrOutput() MetastoreConfigPtrOutput

func (MetastoreConfigArgs) ToMetastoreConfigPtrOutputWithContext

func (i MetastoreConfigArgs) ToMetastoreConfigPtrOutputWithContext(ctx context.Context) MetastoreConfigPtrOutput

type MetastoreConfigInput

type MetastoreConfigInput interface {
	pulumi.Input

	ToMetastoreConfigOutput() MetastoreConfigOutput
	ToMetastoreConfigOutputWithContext(context.Context) MetastoreConfigOutput
}

MetastoreConfigInput is an input type that accepts MetastoreConfigArgs and MetastoreConfigOutput values. You can construct a concrete instance of `MetastoreConfigInput` via:

MetastoreConfigArgs{...}

type MetastoreConfigOutput

type MetastoreConfigOutput struct{ *pulumi.OutputState }

Specifies a Metastore configuration.

func (MetastoreConfigOutput) DataprocMetastoreService

func (o MetastoreConfigOutput) DataprocMetastoreService() pulumi.StringOutput

Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name]

func (MetastoreConfigOutput) ElementType

func (MetastoreConfigOutput) ElementType() reflect.Type

func (MetastoreConfigOutput) ToMetastoreConfigOutput

func (o MetastoreConfigOutput) ToMetastoreConfigOutput() MetastoreConfigOutput

func (MetastoreConfigOutput) ToMetastoreConfigOutputWithContext

func (o MetastoreConfigOutput) ToMetastoreConfigOutputWithContext(ctx context.Context) MetastoreConfigOutput

func (MetastoreConfigOutput) ToMetastoreConfigPtrOutput

func (o MetastoreConfigOutput) ToMetastoreConfigPtrOutput() MetastoreConfigPtrOutput

func (MetastoreConfigOutput) ToMetastoreConfigPtrOutputWithContext

func (o MetastoreConfigOutput) ToMetastoreConfigPtrOutputWithContext(ctx context.Context) MetastoreConfigPtrOutput

type MetastoreConfigPtrInput

type MetastoreConfigPtrInput interface {
	pulumi.Input

	ToMetastoreConfigPtrOutput() MetastoreConfigPtrOutput
	ToMetastoreConfigPtrOutputWithContext(context.Context) MetastoreConfigPtrOutput
}

MetastoreConfigPtrInput is an input type that accepts MetastoreConfigArgs, MetastoreConfigPtr and MetastoreConfigPtrOutput values. You can construct a concrete instance of `MetastoreConfigPtrInput` via:

        MetastoreConfigArgs{...}

or:

        nil

type MetastoreConfigPtrOutput

type MetastoreConfigPtrOutput struct{ *pulumi.OutputState }

func (MetastoreConfigPtrOutput) DataprocMetastoreService

func (o MetastoreConfigPtrOutput) DataprocMetastoreService() pulumi.StringPtrOutput

Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name]

func (MetastoreConfigPtrOutput) Elem

func (MetastoreConfigPtrOutput) ElementType

func (MetastoreConfigPtrOutput) ElementType() reflect.Type

func (MetastoreConfigPtrOutput) ToMetastoreConfigPtrOutput

func (o MetastoreConfigPtrOutput) ToMetastoreConfigPtrOutput() MetastoreConfigPtrOutput

func (MetastoreConfigPtrOutput) ToMetastoreConfigPtrOutputWithContext

func (o MetastoreConfigPtrOutput) ToMetastoreConfigPtrOutputWithContext(ctx context.Context) MetastoreConfigPtrOutput

type MetastoreConfigResponse

type MetastoreConfigResponse struct {
	// Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name]
	DataprocMetastoreService string `pulumi:"dataprocMetastoreService"`
}

Specifies a Metastore configuration.

type MetastoreConfigResponseOutput

type MetastoreConfigResponseOutput struct{ *pulumi.OutputState }

Specifies a Metastore configuration.

func (MetastoreConfigResponseOutput) DataprocMetastoreService

func (o MetastoreConfigResponseOutput) DataprocMetastoreService() pulumi.StringOutput

Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name]

func (MetastoreConfigResponseOutput) ElementType

func (MetastoreConfigResponseOutput) ToMetastoreConfigResponseOutput

func (o MetastoreConfigResponseOutput) ToMetastoreConfigResponseOutput() MetastoreConfigResponseOutput

func (MetastoreConfigResponseOutput) ToMetastoreConfigResponseOutputWithContext

func (o MetastoreConfigResponseOutput) ToMetastoreConfigResponseOutputWithContext(ctx context.Context) MetastoreConfigResponseOutput

type Metric added in v0.15.0

type Metric struct {
	// Optional. Specify one or more Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) to collect for the metric course (for the SPARK metric source (any Spark metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified).Provide metrics in the following format: METRIC_SOURCE: INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified overridden metrics are collected for the metric source. For example, if one or more spark:executive metrics are listed as metric overrides, other SPARK metrics are not collected. The collection of the metrics for other enabled custom metric sources is unaffected. For example, if both SPARK andd YARN metric sources are enabled, and overrides are provided for Spark metrics only, all YARN metrics are collected.
	MetricOverrides []string `pulumi:"metricOverrides"`
	// A standard set of metrics is collected unless metricOverrides are specified for the metric source (see Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) for more information).
	MetricSource MetricMetricSource `pulumi:"metricSource"`
}

A Dataproc custom metric.

type MetricArgs added in v0.15.0

type MetricArgs struct {
	// Optional. Specify one or more Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) to collect for the metric course (for the SPARK metric source (any Spark metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified).Provide metrics in the following format: METRIC_SOURCE: INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified overridden metrics are collected for the metric source. For example, if one or more spark:executive metrics are listed as metric overrides, other SPARK metrics are not collected. The collection of the metrics for other enabled custom metric sources is unaffected. For example, if both SPARK andd YARN metric sources are enabled, and overrides are provided for Spark metrics only, all YARN metrics are collected.
	MetricOverrides pulumi.StringArrayInput `pulumi:"metricOverrides"`
	// A standard set of metrics is collected unless metricOverrides are specified for the metric source (see Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) for more information).
	MetricSource MetricMetricSourceInput `pulumi:"metricSource"`
}

A Dataproc custom metric.

func (MetricArgs) ElementType added in v0.15.0

func (MetricArgs) ElementType() reflect.Type

func (MetricArgs) ToMetricOutput added in v0.15.0

func (i MetricArgs) ToMetricOutput() MetricOutput

func (MetricArgs) ToMetricOutputWithContext added in v0.15.0

func (i MetricArgs) ToMetricOutputWithContext(ctx context.Context) MetricOutput

type MetricArray added in v0.15.0

type MetricArray []MetricInput

func (MetricArray) ElementType added in v0.15.0

func (MetricArray) ElementType() reflect.Type

func (MetricArray) ToMetricArrayOutput added in v0.15.0

func (i MetricArray) ToMetricArrayOutput() MetricArrayOutput

func (MetricArray) ToMetricArrayOutputWithContext added in v0.15.0

func (i MetricArray) ToMetricArrayOutputWithContext(ctx context.Context) MetricArrayOutput

type MetricArrayInput added in v0.15.0

type MetricArrayInput interface {
	pulumi.Input

	ToMetricArrayOutput() MetricArrayOutput
	ToMetricArrayOutputWithContext(context.Context) MetricArrayOutput
}

MetricArrayInput is an input type that accepts MetricArray and MetricArrayOutput values. You can construct a concrete instance of `MetricArrayInput` via:

MetricArray{ MetricArgs{...} }

type MetricArrayOutput added in v0.15.0

type MetricArrayOutput struct{ *pulumi.OutputState }

func (MetricArrayOutput) ElementType added in v0.15.0

func (MetricArrayOutput) ElementType() reflect.Type

func (MetricArrayOutput) Index added in v0.15.0

func (MetricArrayOutput) ToMetricArrayOutput added in v0.15.0

func (o MetricArrayOutput) ToMetricArrayOutput() MetricArrayOutput

func (MetricArrayOutput) ToMetricArrayOutputWithContext added in v0.15.0

func (o MetricArrayOutput) ToMetricArrayOutputWithContext(ctx context.Context) MetricArrayOutput

type MetricInput added in v0.15.0

type MetricInput interface {
	pulumi.Input

	ToMetricOutput() MetricOutput
	ToMetricOutputWithContext(context.Context) MetricOutput
}

MetricInput is an input type that accepts MetricArgs and MetricOutput values. You can construct a concrete instance of `MetricInput` via:

MetricArgs{...}

type MetricMetricSource added in v0.15.0

type MetricMetricSource string

Required. A standard set of metrics is collected unless metricOverrides are specified for the metric source (see Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) for more information).

func (MetricMetricSource) ElementType added in v0.15.0

func (MetricMetricSource) ElementType() reflect.Type

func (MetricMetricSource) ToMetricMetricSourceOutput added in v0.15.0

func (e MetricMetricSource) ToMetricMetricSourceOutput() MetricMetricSourceOutput

func (MetricMetricSource) ToMetricMetricSourceOutputWithContext added in v0.15.0

func (e MetricMetricSource) ToMetricMetricSourceOutputWithContext(ctx context.Context) MetricMetricSourceOutput

func (MetricMetricSource) ToMetricMetricSourcePtrOutput added in v0.15.0

func (e MetricMetricSource) ToMetricMetricSourcePtrOutput() MetricMetricSourcePtrOutput

func (MetricMetricSource) ToMetricMetricSourcePtrOutputWithContext added in v0.15.0

func (e MetricMetricSource) ToMetricMetricSourcePtrOutputWithContext(ctx context.Context) MetricMetricSourcePtrOutput

func (MetricMetricSource) ToStringOutput added in v0.15.0

func (e MetricMetricSource) ToStringOutput() pulumi.StringOutput

func (MetricMetricSource) ToStringOutputWithContext added in v0.15.0

func (e MetricMetricSource) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput

func (MetricMetricSource) ToStringPtrOutput added in v0.15.0

func (e MetricMetricSource) ToStringPtrOutput() pulumi.StringPtrOutput

func (MetricMetricSource) ToStringPtrOutputWithContext added in v0.15.0

func (e MetricMetricSource) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput

type MetricMetricSourceInput added in v0.15.0

type MetricMetricSourceInput interface {
	pulumi.Input

	ToMetricMetricSourceOutput() MetricMetricSourceOutput
	ToMetricMetricSourceOutputWithContext(context.Context) MetricMetricSourceOutput
}

MetricMetricSourceInput is an input type that accepts MetricMetricSourceArgs and MetricMetricSourceOutput values. You can construct a concrete instance of `MetricMetricSourceInput` via:

MetricMetricSourceArgs{...}

type MetricMetricSourceOutput added in v0.15.0

type MetricMetricSourceOutput struct{ *pulumi.OutputState }

func (MetricMetricSourceOutput) ElementType added in v0.15.0

func (MetricMetricSourceOutput) ElementType() reflect.Type

func (MetricMetricSourceOutput) ToMetricMetricSourceOutput added in v0.15.0

func (o MetricMetricSourceOutput) ToMetricMetricSourceOutput() MetricMetricSourceOutput

func (MetricMetricSourceOutput) ToMetricMetricSourceOutputWithContext added in v0.15.0

func (o MetricMetricSourceOutput) ToMetricMetricSourceOutputWithContext(ctx context.Context) MetricMetricSourceOutput

func (MetricMetricSourceOutput) ToMetricMetricSourcePtrOutput added in v0.15.0

func (o MetricMetricSourceOutput) ToMetricMetricSourcePtrOutput() MetricMetricSourcePtrOutput

func (MetricMetricSourceOutput) ToMetricMetricSourcePtrOutputWithContext added in v0.15.0

func (o MetricMetricSourceOutput) ToMetricMetricSourcePtrOutputWithContext(ctx context.Context) MetricMetricSourcePtrOutput

func (MetricMetricSourceOutput) ToStringOutput added in v0.15.0

func (o MetricMetricSourceOutput) ToStringOutput() pulumi.StringOutput

func (MetricMetricSourceOutput) ToStringOutputWithContext added in v0.15.0

func (o MetricMetricSourceOutput) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput

func (MetricMetricSourceOutput) ToStringPtrOutput added in v0.15.0

func (o MetricMetricSourceOutput) ToStringPtrOutput() pulumi.StringPtrOutput

func (MetricMetricSourceOutput) ToStringPtrOutputWithContext added in v0.15.0

func (o MetricMetricSourceOutput) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput

type MetricMetricSourcePtrInput added in v0.15.0

type MetricMetricSourcePtrInput interface {
	pulumi.Input

	ToMetricMetricSourcePtrOutput() MetricMetricSourcePtrOutput
	ToMetricMetricSourcePtrOutputWithContext(context.Context) MetricMetricSourcePtrOutput
}

func MetricMetricSourcePtr added in v0.15.0

func MetricMetricSourcePtr(v string) MetricMetricSourcePtrInput

type MetricMetricSourcePtrOutput added in v0.15.0

type MetricMetricSourcePtrOutput struct{ *pulumi.OutputState }

func (MetricMetricSourcePtrOutput) Elem added in v0.15.0

func (MetricMetricSourcePtrOutput) ElementType added in v0.15.0

func (MetricMetricSourcePtrOutput) ToMetricMetricSourcePtrOutput added in v0.15.0

func (o MetricMetricSourcePtrOutput) ToMetricMetricSourcePtrOutput() MetricMetricSourcePtrOutput

func (MetricMetricSourcePtrOutput) ToMetricMetricSourcePtrOutputWithContext added in v0.15.0

func (o MetricMetricSourcePtrOutput) ToMetricMetricSourcePtrOutputWithContext(ctx context.Context) MetricMetricSourcePtrOutput

func (MetricMetricSourcePtrOutput) ToStringPtrOutput added in v0.15.0

func (o MetricMetricSourcePtrOutput) ToStringPtrOutput() pulumi.StringPtrOutput

func (MetricMetricSourcePtrOutput) ToStringPtrOutputWithContext added in v0.15.0

func (o MetricMetricSourcePtrOutput) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput

type MetricOutput added in v0.15.0

type MetricOutput struct{ *pulumi.OutputState }

A Dataproc custom metric.

func (MetricOutput) ElementType added in v0.15.0

func (MetricOutput) ElementType() reflect.Type

func (MetricOutput) MetricOverrides added in v0.15.0

func (o MetricOutput) MetricOverrides() pulumi.StringArrayOutput

Optional. Specify one or more Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) to collect for the metric course (for the SPARK metric source (any Spark metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified).Provide metrics in the following format: METRIC_SOURCE: INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified overridden metrics are collected for the metric source. For example, if one or more spark:executive metrics are listed as metric overrides, other SPARK metrics are not collected. The collection of the metrics for other enabled custom metric sources is unaffected. For example, if both SPARK andd YARN metric sources are enabled, and overrides are provided for Spark metrics only, all YARN metrics are collected.

func (MetricOutput) MetricSource added in v0.15.0

func (o MetricOutput) MetricSource() MetricMetricSourceOutput

A standard set of metrics is collected unless metricOverrides are specified for the metric source (see Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) for more information).

func (MetricOutput) ToMetricOutput added in v0.15.0

func (o MetricOutput) ToMetricOutput() MetricOutput

func (MetricOutput) ToMetricOutputWithContext added in v0.15.0

func (o MetricOutput) ToMetricOutputWithContext(ctx context.Context) MetricOutput

type MetricResponse added in v0.15.0

type MetricResponse struct {
	// Optional. Specify one or more Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) to collect for the metric course (for the SPARK metric source (any Spark metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified).Provide metrics in the following format: METRIC_SOURCE: INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified overridden metrics are collected for the metric source. For example, if one or more spark:executive metrics are listed as metric overrides, other SPARK metrics are not collected. The collection of the metrics for other enabled custom metric sources is unaffected. For example, if both SPARK andd YARN metric sources are enabled, and overrides are provided for Spark metrics only, all YARN metrics are collected.
	MetricOverrides []string `pulumi:"metricOverrides"`
	// A standard set of metrics is collected unless metricOverrides are specified for the metric source (see Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) for more information).
	MetricSource string `pulumi:"metricSource"`
}

A Dataproc custom metric.

type MetricResponseArrayOutput added in v0.15.0

type MetricResponseArrayOutput struct{ *pulumi.OutputState }

func (MetricResponseArrayOutput) ElementType added in v0.15.0

func (MetricResponseArrayOutput) ElementType() reflect.Type

func (MetricResponseArrayOutput) Index added in v0.15.0

func (MetricResponseArrayOutput) ToMetricResponseArrayOutput added in v0.15.0

func (o MetricResponseArrayOutput) ToMetricResponseArrayOutput() MetricResponseArrayOutput

func (MetricResponseArrayOutput) ToMetricResponseArrayOutputWithContext added in v0.15.0

func (o MetricResponseArrayOutput) ToMetricResponseArrayOutputWithContext(ctx context.Context) MetricResponseArrayOutput

type MetricResponseOutput added in v0.15.0

type MetricResponseOutput struct{ *pulumi.OutputState }

A Dataproc custom metric.

func (MetricResponseOutput) ElementType added in v0.15.0

func (MetricResponseOutput) ElementType() reflect.Type

func (MetricResponseOutput) MetricOverrides added in v0.15.0

func (o MetricResponseOutput) MetricOverrides() pulumi.StringArrayOutput

Optional. Specify one or more Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) to collect for the metric course (for the SPARK metric source (any Spark metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified).Provide metrics in the following format: METRIC_SOURCE: INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified overridden metrics are collected for the metric source. For example, if one or more spark:executive metrics are listed as metric overrides, other SPARK metrics are not collected. The collection of the metrics for other enabled custom metric sources is unaffected. For example, if both SPARK andd YARN metric sources are enabled, and overrides are provided for Spark metrics only, all YARN metrics are collected.

func (MetricResponseOutput) MetricSource added in v0.15.0

func (o MetricResponseOutput) MetricSource() pulumi.StringOutput

A standard set of metrics is collected unless metricOverrides are specified for the metric source (see Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) for more information).

func (MetricResponseOutput) ToMetricResponseOutput added in v0.15.0

func (o MetricResponseOutput) ToMetricResponseOutput() MetricResponseOutput

func (MetricResponseOutput) ToMetricResponseOutputWithContext added in v0.15.0

func (o MetricResponseOutput) ToMetricResponseOutputWithContext(ctx context.Context) MetricResponseOutput

type NamespacedGkeDeploymentTarget

type NamespacedGkeDeploymentTarget struct {
	// Optional. A namespace within the GKE cluster to deploy into.
	ClusterNamespace *string `pulumi:"clusterNamespace"`
	// Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
	TargetGkeCluster *string `pulumi:"targetGkeCluster"`
}

Deprecated. Used only for the deprecated beta. A full, namespace-isolated deployment target for an existing GKE cluster.

type NamespacedGkeDeploymentTargetArgs

type NamespacedGkeDeploymentTargetArgs struct {
	// Optional. A namespace within the GKE cluster to deploy into.
	ClusterNamespace pulumi.StringPtrInput `pulumi:"clusterNamespace"`
	// Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
	TargetGkeCluster pulumi.StringPtrInput `pulumi:"targetGkeCluster"`
}

Deprecated. Used only for the deprecated beta. A full, namespace-isolated deployment target for an existing GKE cluster.

func (NamespacedGkeDeploymentTargetArgs) ElementType

func (NamespacedGkeDeploymentTargetArgs) ToNamespacedGkeDeploymentTargetOutput

func (i NamespacedGkeDeploymentTargetArgs) ToNamespacedGkeDeploymentTargetOutput() NamespacedGkeDeploymentTargetOutput

func (NamespacedGkeDeploymentTargetArgs) ToNamespacedGkeDeploymentTargetOutputWithContext

func (i NamespacedGkeDeploymentTargetArgs) ToNamespacedGkeDeploymentTargetOutputWithContext(ctx context.Context) NamespacedGkeDeploymentTargetOutput

func (NamespacedGkeDeploymentTargetArgs) ToNamespacedGkeDeploymentTargetPtrOutput

func (i NamespacedGkeDeploymentTargetArgs) ToNamespacedGkeDeploymentTargetPtrOutput() NamespacedGkeDeploymentTargetPtrOutput

func (NamespacedGkeDeploymentTargetArgs) ToNamespacedGkeDeploymentTargetPtrOutputWithContext

func (i NamespacedGkeDeploymentTargetArgs) ToNamespacedGkeDeploymentTargetPtrOutputWithContext(ctx context.Context) NamespacedGkeDeploymentTargetPtrOutput

type NamespacedGkeDeploymentTargetInput

type NamespacedGkeDeploymentTargetInput interface {
	pulumi.Input

	ToNamespacedGkeDeploymentTargetOutput() NamespacedGkeDeploymentTargetOutput
	ToNamespacedGkeDeploymentTargetOutputWithContext(context.Context) NamespacedGkeDeploymentTargetOutput
}

NamespacedGkeDeploymentTargetInput is an input type that accepts NamespacedGkeDeploymentTargetArgs and NamespacedGkeDeploymentTargetOutput values. You can construct a concrete instance of `NamespacedGkeDeploymentTargetInput` via:

NamespacedGkeDeploymentTargetArgs{...}

type NamespacedGkeDeploymentTargetOutput

type NamespacedGkeDeploymentTargetOutput struct{ *pulumi.OutputState }

Deprecated. Used only for the deprecated beta. A full, namespace-isolated deployment target for an existing GKE cluster.

func (NamespacedGkeDeploymentTargetOutput) ClusterNamespace

Optional. A namespace within the GKE cluster to deploy into.

func (NamespacedGkeDeploymentTargetOutput) ElementType

func (NamespacedGkeDeploymentTargetOutput) TargetGkeCluster

Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'

func (NamespacedGkeDeploymentTargetOutput) ToNamespacedGkeDeploymentTargetOutput

func (o NamespacedGkeDeploymentTargetOutput) ToNamespacedGkeDeploymentTargetOutput() NamespacedGkeDeploymentTargetOutput

func (NamespacedGkeDeploymentTargetOutput) ToNamespacedGkeDeploymentTargetOutputWithContext

func (o NamespacedGkeDeploymentTargetOutput) ToNamespacedGkeDeploymentTargetOutputWithContext(ctx context.Context) NamespacedGkeDeploymentTargetOutput

func (NamespacedGkeDeploymentTargetOutput) ToNamespacedGkeDeploymentTargetPtrOutput

func (o NamespacedGkeDeploymentTargetOutput) ToNamespacedGkeDeploymentTargetPtrOutput() NamespacedGkeDeploymentTargetPtrOutput

func (NamespacedGkeDeploymentTargetOutput) ToNamespacedGkeDeploymentTargetPtrOutputWithContext

func (o NamespacedGkeDeploymentTargetOutput) ToNamespacedGkeDeploymentTargetPtrOutputWithContext(ctx context.Context) NamespacedGkeDeploymentTargetPtrOutput

type NamespacedGkeDeploymentTargetPtrInput

type NamespacedGkeDeploymentTargetPtrInput interface {
	pulumi.Input

	ToNamespacedGkeDeploymentTargetPtrOutput() NamespacedGkeDeploymentTargetPtrOutput
	ToNamespacedGkeDeploymentTargetPtrOutputWithContext(context.Context) NamespacedGkeDeploymentTargetPtrOutput
}

NamespacedGkeDeploymentTargetPtrInput is an input type that accepts NamespacedGkeDeploymentTargetArgs, NamespacedGkeDeploymentTargetPtr and NamespacedGkeDeploymentTargetPtrOutput values. You can construct a concrete instance of `NamespacedGkeDeploymentTargetPtrInput` via:

        NamespacedGkeDeploymentTargetArgs{...}

or:

        nil

type NamespacedGkeDeploymentTargetPtrOutput

type NamespacedGkeDeploymentTargetPtrOutput struct{ *pulumi.OutputState }

func (NamespacedGkeDeploymentTargetPtrOutput) ClusterNamespace

Optional. A namespace within the GKE cluster to deploy into.

func (NamespacedGkeDeploymentTargetPtrOutput) Elem

func (NamespacedGkeDeploymentTargetPtrOutput) ElementType

func (NamespacedGkeDeploymentTargetPtrOutput) TargetGkeCluster

Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'

func (NamespacedGkeDeploymentTargetPtrOutput) ToNamespacedGkeDeploymentTargetPtrOutput

func (o NamespacedGkeDeploymentTargetPtrOutput) ToNamespacedGkeDeploymentTargetPtrOutput() NamespacedGkeDeploymentTargetPtrOutput

func (NamespacedGkeDeploymentTargetPtrOutput) ToNamespacedGkeDeploymentTargetPtrOutputWithContext

func (o NamespacedGkeDeploymentTargetPtrOutput) ToNamespacedGkeDeploymentTargetPtrOutputWithContext(ctx context.Context) NamespacedGkeDeploymentTargetPtrOutput

type NamespacedGkeDeploymentTargetResponse

type NamespacedGkeDeploymentTargetResponse struct {
	// Optional. A namespace within the GKE cluster to deploy into.
	ClusterNamespace string `pulumi:"clusterNamespace"`
	// Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
	TargetGkeCluster string `pulumi:"targetGkeCluster"`
}

Deprecated. Used only for the deprecated beta. A full, namespace-isolated deployment target for an existing GKE cluster.

type NamespacedGkeDeploymentTargetResponseOutput

type NamespacedGkeDeploymentTargetResponseOutput struct{ *pulumi.OutputState }

Deprecated. Used only for the deprecated beta. A full, namespace-isolated deployment target for an existing GKE cluster.

func (NamespacedGkeDeploymentTargetResponseOutput) ClusterNamespace

Optional. A namespace within the GKE cluster to deploy into.

func (NamespacedGkeDeploymentTargetResponseOutput) ElementType

func (NamespacedGkeDeploymentTargetResponseOutput) TargetGkeCluster

Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'

func (NamespacedGkeDeploymentTargetResponseOutput) ToNamespacedGkeDeploymentTargetResponseOutput

func (o NamespacedGkeDeploymentTargetResponseOutput) ToNamespacedGkeDeploymentTargetResponseOutput() NamespacedGkeDeploymentTargetResponseOutput

func (NamespacedGkeDeploymentTargetResponseOutput) ToNamespacedGkeDeploymentTargetResponseOutputWithContext

func (o NamespacedGkeDeploymentTargetResponseOutput) ToNamespacedGkeDeploymentTargetResponseOutputWithContext(ctx context.Context) NamespacedGkeDeploymentTargetResponseOutput

type NodeGroup added in v0.28.0

type NodeGroup struct {
	pulumi.CustomResourceState

	ClusterId pulumi.StringOutput `pulumi:"clusterId"`
	// Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn.
	Labels pulumi.StringMapOutput `pulumi:"labels"`
	// The Node group resource name (https://aip.dev/122).
	Name pulumi.StringOutput `pulumi:"name"`
	// Optional. The node group instance group configuration.
	NodeGroupConfig InstanceGroupConfigResponseOutput `pulumi:"nodeGroupConfig"`
	// Optional. An optional node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
	NodeGroupId pulumi.StringPtrOutput `pulumi:"nodeGroupId"`
	// Optional. operation id of the parent operation sending the create request
	ParentOperationId pulumi.StringPtrOutput `pulumi:"parentOperationId"`
	Project           pulumi.StringOutput    `pulumi:"project"`
	RegionId          pulumi.StringOutput    `pulumi:"regionId"`
	// Optional. A unique ID used to identify the request. If the server receives two CreateNodeGroupRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateNodeGroupRequests) with the same ID, the second request is ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId pulumi.StringPtrOutput `pulumi:"requestId"`
	// Node group roles.
	Roles pulumi.StringArrayOutput `pulumi:"roles"`
}

Creates a node group in a cluster. The returned Operation.metadata is NodeGroupOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata). Auto-naming is currently not supported for this resource. Note - this resource's API doesn't support deletion. When deleted, the resource will persist on Google Cloud even though it will be deleted from Pulumi state.

func GetNodeGroup added in v0.28.0

func GetNodeGroup(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *NodeGroupState, opts ...pulumi.ResourceOption) (*NodeGroup, error)

GetNodeGroup gets an existing NodeGroup resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewNodeGroup added in v0.28.0

func NewNodeGroup(ctx *pulumi.Context,
	name string, args *NodeGroupArgs, opts ...pulumi.ResourceOption) (*NodeGroup, error)

NewNodeGroup registers a new resource with the given unique name, arguments, and options.

func (*NodeGroup) ElementType added in v0.28.0

func (*NodeGroup) ElementType() reflect.Type

func (*NodeGroup) ToNodeGroupOutput added in v0.28.0

func (i *NodeGroup) ToNodeGroupOutput() NodeGroupOutput

func (*NodeGroup) ToNodeGroupOutputWithContext added in v0.28.0

func (i *NodeGroup) ToNodeGroupOutputWithContext(ctx context.Context) NodeGroupOutput

type NodeGroupAffinity

type NodeGroupAffinity struct {
	// The URI of a sole-tenant node group resource (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on.A full URL, partial URI, or node group name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1
	NodeGroupUri string `pulumi:"nodeGroupUri"`
}

Node Group Affinity for clusters using sole-tenant node groups. The Dataproc NodeGroupAffinity resource is not related to the Dataproc NodeGroup resource.

type NodeGroupAffinityArgs

type NodeGroupAffinityArgs struct {
	// The URI of a sole-tenant node group resource (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on.A full URL, partial URI, or node group name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1
	NodeGroupUri pulumi.StringInput `pulumi:"nodeGroupUri"`
}

Node Group Affinity for clusters using sole-tenant node groups. The Dataproc NodeGroupAffinity resource is not related to the Dataproc NodeGroup resource.

func (NodeGroupAffinityArgs) ElementType

func (NodeGroupAffinityArgs) ElementType() reflect.Type

func (NodeGroupAffinityArgs) ToNodeGroupAffinityOutput

func (i NodeGroupAffinityArgs) ToNodeGroupAffinityOutput() NodeGroupAffinityOutput

func (NodeGroupAffinityArgs) ToNodeGroupAffinityOutputWithContext

func (i NodeGroupAffinityArgs) ToNodeGroupAffinityOutputWithContext(ctx context.Context) NodeGroupAffinityOutput

func (NodeGroupAffinityArgs) ToNodeGroupAffinityPtrOutput

func (i NodeGroupAffinityArgs) ToNodeGroupAffinityPtrOutput() NodeGroupAffinityPtrOutput

func (NodeGroupAffinityArgs) ToNodeGroupAffinityPtrOutputWithContext

func (i NodeGroupAffinityArgs) ToNodeGroupAffinityPtrOutputWithContext(ctx context.Context) NodeGroupAffinityPtrOutput

type NodeGroupAffinityInput

type NodeGroupAffinityInput interface {
	pulumi.Input

	ToNodeGroupAffinityOutput() NodeGroupAffinityOutput
	ToNodeGroupAffinityOutputWithContext(context.Context) NodeGroupAffinityOutput
}

NodeGroupAffinityInput is an input type that accepts NodeGroupAffinityArgs and NodeGroupAffinityOutput values. You can construct a concrete instance of `NodeGroupAffinityInput` via:

NodeGroupAffinityArgs{...}

type NodeGroupAffinityOutput

type NodeGroupAffinityOutput struct{ *pulumi.OutputState }

Node Group Affinity for clusters using sole-tenant node groups. The Dataproc NodeGroupAffinity resource is not related to the Dataproc NodeGroup resource.

func (NodeGroupAffinityOutput) ElementType

func (NodeGroupAffinityOutput) ElementType() reflect.Type

func (NodeGroupAffinityOutput) NodeGroupUri

func (o NodeGroupAffinityOutput) NodeGroupUri() pulumi.StringOutput

The URI of a sole-tenant node group resource (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on.A full URL, partial URI, or node group name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1

func (NodeGroupAffinityOutput) ToNodeGroupAffinityOutput

func (o NodeGroupAffinityOutput) ToNodeGroupAffinityOutput() NodeGroupAffinityOutput

func (NodeGroupAffinityOutput) ToNodeGroupAffinityOutputWithContext

func (o NodeGroupAffinityOutput) ToNodeGroupAffinityOutputWithContext(ctx context.Context) NodeGroupAffinityOutput

func (NodeGroupAffinityOutput) ToNodeGroupAffinityPtrOutput

func (o NodeGroupAffinityOutput) ToNodeGroupAffinityPtrOutput() NodeGroupAffinityPtrOutput

func (NodeGroupAffinityOutput) ToNodeGroupAffinityPtrOutputWithContext

func (o NodeGroupAffinityOutput) ToNodeGroupAffinityPtrOutputWithContext(ctx context.Context) NodeGroupAffinityPtrOutput

type NodeGroupAffinityPtrInput

type NodeGroupAffinityPtrInput interface {
	pulumi.Input

	ToNodeGroupAffinityPtrOutput() NodeGroupAffinityPtrOutput
	ToNodeGroupAffinityPtrOutputWithContext(context.Context) NodeGroupAffinityPtrOutput
}

NodeGroupAffinityPtrInput is an input type that accepts NodeGroupAffinityArgs, NodeGroupAffinityPtr and NodeGroupAffinityPtrOutput values. You can construct a concrete instance of `NodeGroupAffinityPtrInput` via:

        NodeGroupAffinityArgs{...}

or:

        nil

type NodeGroupAffinityPtrOutput

type NodeGroupAffinityPtrOutput struct{ *pulumi.OutputState }

func (NodeGroupAffinityPtrOutput) Elem

func (NodeGroupAffinityPtrOutput) ElementType

func (NodeGroupAffinityPtrOutput) ElementType() reflect.Type

func (NodeGroupAffinityPtrOutput) NodeGroupUri

The URI of a sole-tenant node group resource (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on.A full URL, partial URI, or node group name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1

func (NodeGroupAffinityPtrOutput) ToNodeGroupAffinityPtrOutput

func (o NodeGroupAffinityPtrOutput) ToNodeGroupAffinityPtrOutput() NodeGroupAffinityPtrOutput

func (NodeGroupAffinityPtrOutput) ToNodeGroupAffinityPtrOutputWithContext

func (o NodeGroupAffinityPtrOutput) ToNodeGroupAffinityPtrOutputWithContext(ctx context.Context) NodeGroupAffinityPtrOutput

type NodeGroupAffinityResponse

type NodeGroupAffinityResponse struct {
	// The URI of a sole-tenant node group resource (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on.A full URL, partial URI, or node group name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1
	NodeGroupUri string `pulumi:"nodeGroupUri"`
}

Node Group Affinity for clusters using sole-tenant node groups. The Dataproc NodeGroupAffinity resource is not related to the Dataproc NodeGroup resource.

type NodeGroupAffinityResponseOutput

type NodeGroupAffinityResponseOutput struct{ *pulumi.OutputState }

Node Group Affinity for clusters using sole-tenant node groups. The Dataproc NodeGroupAffinity resource is not related to the Dataproc NodeGroup resource.

func (NodeGroupAffinityResponseOutput) ElementType

func (NodeGroupAffinityResponseOutput) NodeGroupUri

The URI of a sole-tenant node group resource (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on.A full URL, partial URI, or node group name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1

func (NodeGroupAffinityResponseOutput) ToNodeGroupAffinityResponseOutput

func (o NodeGroupAffinityResponseOutput) ToNodeGroupAffinityResponseOutput() NodeGroupAffinityResponseOutput

func (NodeGroupAffinityResponseOutput) ToNodeGroupAffinityResponseOutputWithContext

func (o NodeGroupAffinityResponseOutput) ToNodeGroupAffinityResponseOutputWithContext(ctx context.Context) NodeGroupAffinityResponseOutput

type NodeGroupArgs added in v0.28.0

type NodeGroupArgs struct {
	ClusterId pulumi.StringInput
	// Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn.
	Labels pulumi.StringMapInput
	// The Node group resource name (https://aip.dev/122).
	Name pulumi.StringPtrInput
	// Optional. The node group instance group configuration.
	NodeGroupConfig InstanceGroupConfigPtrInput
	// Optional. An optional node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
	NodeGroupId pulumi.StringPtrInput
	// Optional. operation id of the parent operation sending the create request
	ParentOperationId pulumi.StringPtrInput
	Project           pulumi.StringPtrInput
	RegionId          pulumi.StringInput
	// Optional. A unique ID used to identify the request. If the server receives two CreateNodeGroupRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateNodeGroupRequests) with the same ID, the second request is ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId pulumi.StringPtrInput
	// Node group roles.
	Roles NodeGroupRolesItemArrayInput
}

The set of arguments for constructing a NodeGroup resource.

func (NodeGroupArgs) ElementType added in v0.28.0

func (NodeGroupArgs) ElementType() reflect.Type

type NodeGroupInput added in v0.28.0

type NodeGroupInput interface {
	pulumi.Input

	ToNodeGroupOutput() NodeGroupOutput
	ToNodeGroupOutputWithContext(ctx context.Context) NodeGroupOutput
}

type NodeGroupOutput added in v0.28.0

type NodeGroupOutput struct{ *pulumi.OutputState }

func (NodeGroupOutput) ClusterId added in v0.28.0

func (o NodeGroupOutput) ClusterId() pulumi.StringOutput

func (NodeGroupOutput) ElementType added in v0.28.0

func (NodeGroupOutput) ElementType() reflect.Type

func (NodeGroupOutput) Labels added in v0.28.0

Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn.

func (NodeGroupOutput) Name added in v0.28.0

The Node group resource name (https://aip.dev/122).

func (NodeGroupOutput) NodeGroupConfig added in v0.28.0

Optional. The node group instance group configuration.

func (NodeGroupOutput) NodeGroupId added in v0.28.0

func (o NodeGroupOutput) NodeGroupId() pulumi.StringPtrOutput

Optional. An optional node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.

func (NodeGroupOutput) ParentOperationId added in v0.32.0

func (o NodeGroupOutput) ParentOperationId() pulumi.StringPtrOutput

Optional. operation id of the parent operation sending the create request

func (NodeGroupOutput) Project added in v0.28.0

func (o NodeGroupOutput) Project() pulumi.StringOutput

func (NodeGroupOutput) RegionId added in v0.28.0

func (o NodeGroupOutput) RegionId() pulumi.StringOutput

func (NodeGroupOutput) RequestId added in v0.28.0

func (o NodeGroupOutput) RequestId() pulumi.StringPtrOutput

Optional. A unique ID used to identify the request. If the server receives two CreateNodeGroupRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateNodeGroupRequests) with the same ID, the second request is ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.

func (NodeGroupOutput) Roles added in v0.28.0

Node group roles.

func (NodeGroupOutput) ToNodeGroupOutput added in v0.28.0

func (o NodeGroupOutput) ToNodeGroupOutput() NodeGroupOutput

func (NodeGroupOutput) ToNodeGroupOutputWithContext added in v0.28.0

func (o NodeGroupOutput) ToNodeGroupOutputWithContext(ctx context.Context) NodeGroupOutput

type NodeGroupResponse added in v0.28.0

type NodeGroupResponse struct {
	// Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn.
	Labels map[string]string `pulumi:"labels"`
	// The Node group resource name (https://aip.dev/122).
	Name string `pulumi:"name"`
	// Optional. The node group instance group configuration.
	NodeGroupConfig InstanceGroupConfigResponse `pulumi:"nodeGroupConfig"`
	// Node group roles.
	Roles []string `pulumi:"roles"`
}

Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource.

type NodeGroupResponseOutput added in v0.28.0

type NodeGroupResponseOutput struct{ *pulumi.OutputState }

Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource.

func (NodeGroupResponseOutput) ElementType added in v0.28.0

func (NodeGroupResponseOutput) ElementType() reflect.Type

func (NodeGroupResponseOutput) Labels added in v0.28.0

Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn.

func (NodeGroupResponseOutput) Name added in v0.28.0

The Node group resource name (https://aip.dev/122).

func (NodeGroupResponseOutput) NodeGroupConfig added in v0.28.0

Optional. The node group instance group configuration.

func (NodeGroupResponseOutput) Roles added in v0.28.0

Node group roles.

func (NodeGroupResponseOutput) ToNodeGroupResponseOutput added in v0.28.0

func (o NodeGroupResponseOutput) ToNodeGroupResponseOutput() NodeGroupResponseOutput

func (NodeGroupResponseOutput) ToNodeGroupResponseOutputWithContext added in v0.28.0

func (o NodeGroupResponseOutput) ToNodeGroupResponseOutputWithContext(ctx context.Context) NodeGroupResponseOutput

type NodeGroupRolesItem added in v0.28.0

type NodeGroupRolesItem string

func (NodeGroupRolesItem) ElementType added in v0.28.0

func (NodeGroupRolesItem) ElementType() reflect.Type

func (NodeGroupRolesItem) ToNodeGroupRolesItemOutput added in v0.28.0

func (e NodeGroupRolesItem) ToNodeGroupRolesItemOutput() NodeGroupRolesItemOutput

func (NodeGroupRolesItem) ToNodeGroupRolesItemOutputWithContext added in v0.28.0

func (e NodeGroupRolesItem) ToNodeGroupRolesItemOutputWithContext(ctx context.Context) NodeGroupRolesItemOutput

func (NodeGroupRolesItem) ToNodeGroupRolesItemPtrOutput added in v0.28.0

func (e NodeGroupRolesItem) ToNodeGroupRolesItemPtrOutput() NodeGroupRolesItemPtrOutput

func (NodeGroupRolesItem) ToNodeGroupRolesItemPtrOutputWithContext added in v0.28.0

func (e NodeGroupRolesItem) ToNodeGroupRolesItemPtrOutputWithContext(ctx context.Context) NodeGroupRolesItemPtrOutput

func (NodeGroupRolesItem) ToStringOutput added in v0.28.0

func (e NodeGroupRolesItem) ToStringOutput() pulumi.StringOutput

func (NodeGroupRolesItem) ToStringOutputWithContext added in v0.28.0

func (e NodeGroupRolesItem) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput

func (NodeGroupRolesItem) ToStringPtrOutput added in v0.28.0

func (e NodeGroupRolesItem) ToStringPtrOutput() pulumi.StringPtrOutput

func (NodeGroupRolesItem) ToStringPtrOutputWithContext added in v0.28.0

func (e NodeGroupRolesItem) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput

type NodeGroupRolesItemArray added in v0.28.0

type NodeGroupRolesItemArray []NodeGroupRolesItem

func (NodeGroupRolesItemArray) ElementType added in v0.28.0

func (NodeGroupRolesItemArray) ElementType() reflect.Type

func (NodeGroupRolesItemArray) ToNodeGroupRolesItemArrayOutput added in v0.28.0

func (i NodeGroupRolesItemArray) ToNodeGroupRolesItemArrayOutput() NodeGroupRolesItemArrayOutput

func (NodeGroupRolesItemArray) ToNodeGroupRolesItemArrayOutputWithContext added in v0.28.0

func (i NodeGroupRolesItemArray) ToNodeGroupRolesItemArrayOutputWithContext(ctx context.Context) NodeGroupRolesItemArrayOutput

type NodeGroupRolesItemArrayInput added in v0.28.0

type NodeGroupRolesItemArrayInput interface {
	pulumi.Input

	ToNodeGroupRolesItemArrayOutput() NodeGroupRolesItemArrayOutput
	ToNodeGroupRolesItemArrayOutputWithContext(context.Context) NodeGroupRolesItemArrayOutput
}

NodeGroupRolesItemArrayInput is an input type that accepts NodeGroupRolesItemArray and NodeGroupRolesItemArrayOutput values. You can construct a concrete instance of `NodeGroupRolesItemArrayInput` via:

NodeGroupRolesItemArray{ NodeGroupRolesItemArgs{...} }

type NodeGroupRolesItemArrayOutput added in v0.28.0

type NodeGroupRolesItemArrayOutput struct{ *pulumi.OutputState }

func (NodeGroupRolesItemArrayOutput) ElementType added in v0.28.0

func (NodeGroupRolesItemArrayOutput) Index added in v0.28.0

func (NodeGroupRolesItemArrayOutput) ToNodeGroupRolesItemArrayOutput added in v0.28.0

func (o NodeGroupRolesItemArrayOutput) ToNodeGroupRolesItemArrayOutput() NodeGroupRolesItemArrayOutput

func (NodeGroupRolesItemArrayOutput) ToNodeGroupRolesItemArrayOutputWithContext added in v0.28.0

func (o NodeGroupRolesItemArrayOutput) ToNodeGroupRolesItemArrayOutputWithContext(ctx context.Context) NodeGroupRolesItemArrayOutput

type NodeGroupRolesItemInput added in v0.28.0

type NodeGroupRolesItemInput interface {
	pulumi.Input

	ToNodeGroupRolesItemOutput() NodeGroupRolesItemOutput
	ToNodeGroupRolesItemOutputWithContext(context.Context) NodeGroupRolesItemOutput
}

NodeGroupRolesItemInput is an input type that accepts NodeGroupRolesItemArgs and NodeGroupRolesItemOutput values. You can construct a concrete instance of `NodeGroupRolesItemInput` via:

NodeGroupRolesItemArgs{...}

type NodeGroupRolesItemOutput added in v0.28.0

type NodeGroupRolesItemOutput struct{ *pulumi.OutputState }

func (NodeGroupRolesItemOutput) ElementType added in v0.28.0

func (NodeGroupRolesItemOutput) ElementType() reflect.Type

func (NodeGroupRolesItemOutput) ToNodeGroupRolesItemOutput added in v0.28.0

func (o NodeGroupRolesItemOutput) ToNodeGroupRolesItemOutput() NodeGroupRolesItemOutput

func (NodeGroupRolesItemOutput) ToNodeGroupRolesItemOutputWithContext added in v0.28.0

func (o NodeGroupRolesItemOutput) ToNodeGroupRolesItemOutputWithContext(ctx context.Context) NodeGroupRolesItemOutput

func (NodeGroupRolesItemOutput) ToNodeGroupRolesItemPtrOutput added in v0.28.0

func (o NodeGroupRolesItemOutput) ToNodeGroupRolesItemPtrOutput() NodeGroupRolesItemPtrOutput

func (NodeGroupRolesItemOutput) ToNodeGroupRolesItemPtrOutputWithContext added in v0.28.0

func (o NodeGroupRolesItemOutput) ToNodeGroupRolesItemPtrOutputWithContext(ctx context.Context) NodeGroupRolesItemPtrOutput

func (NodeGroupRolesItemOutput) ToStringOutput added in v0.28.0

func (o NodeGroupRolesItemOutput) ToStringOutput() pulumi.StringOutput

func (NodeGroupRolesItemOutput) ToStringOutputWithContext added in v0.28.0

func (o NodeGroupRolesItemOutput) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput

func (NodeGroupRolesItemOutput) ToStringPtrOutput added in v0.28.0

func (o NodeGroupRolesItemOutput) ToStringPtrOutput() pulumi.StringPtrOutput

func (NodeGroupRolesItemOutput) ToStringPtrOutputWithContext added in v0.28.0

func (o NodeGroupRolesItemOutput) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput

type NodeGroupRolesItemPtrInput added in v0.28.0

type NodeGroupRolesItemPtrInput interface {
	pulumi.Input

	ToNodeGroupRolesItemPtrOutput() NodeGroupRolesItemPtrOutput
	ToNodeGroupRolesItemPtrOutputWithContext(context.Context) NodeGroupRolesItemPtrOutput
}

func NodeGroupRolesItemPtr added in v0.28.0

func NodeGroupRolesItemPtr(v string) NodeGroupRolesItemPtrInput

type NodeGroupRolesItemPtrOutput added in v0.28.0

type NodeGroupRolesItemPtrOutput struct{ *pulumi.OutputState }

func (NodeGroupRolesItemPtrOutput) Elem added in v0.28.0

func (NodeGroupRolesItemPtrOutput) ElementType added in v0.28.0

func (NodeGroupRolesItemPtrOutput) ToNodeGroupRolesItemPtrOutput added in v0.28.0

func (o NodeGroupRolesItemPtrOutput) ToNodeGroupRolesItemPtrOutput() NodeGroupRolesItemPtrOutput

func (NodeGroupRolesItemPtrOutput) ToNodeGroupRolesItemPtrOutputWithContext added in v0.28.0

func (o NodeGroupRolesItemPtrOutput) ToNodeGroupRolesItemPtrOutputWithContext(ctx context.Context) NodeGroupRolesItemPtrOutput

func (NodeGroupRolesItemPtrOutput) ToStringPtrOutput added in v0.28.0

func (o NodeGroupRolesItemPtrOutput) ToStringPtrOutput() pulumi.StringPtrOutput

func (NodeGroupRolesItemPtrOutput) ToStringPtrOutputWithContext added in v0.28.0

func (o NodeGroupRolesItemPtrOutput) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput

type NodeGroupState added in v0.28.0

type NodeGroupState struct {
}

func (NodeGroupState) ElementType added in v0.28.0

func (NodeGroupState) ElementType() reflect.Type

type NodeGroupType added in v0.28.0

type NodeGroupType struct {
	// Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn.
	Labels map[string]string `pulumi:"labels"`
	// The Node group resource name (https://aip.dev/122).
	Name *string `pulumi:"name"`
	// Optional. The node group instance group configuration.
	NodeGroupConfig *InstanceGroupConfig `pulumi:"nodeGroupConfig"`
	// Node group roles.
	Roles []NodeGroupRolesItem `pulumi:"roles"`
}

Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource.

type NodeGroupTypeArgs added in v0.28.0

type NodeGroupTypeArgs struct {
	// Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn.
	Labels pulumi.StringMapInput `pulumi:"labels"`
	// The Node group resource name (https://aip.dev/122).
	Name pulumi.StringPtrInput `pulumi:"name"`
	// Optional. The node group instance group configuration.
	NodeGroupConfig InstanceGroupConfigPtrInput `pulumi:"nodeGroupConfig"`
	// Node group roles.
	Roles NodeGroupRolesItemArrayInput `pulumi:"roles"`
}

Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource.

func (NodeGroupTypeArgs) ElementType added in v0.28.0

func (NodeGroupTypeArgs) ElementType() reflect.Type

func (NodeGroupTypeArgs) ToNodeGroupTypeOutput added in v0.28.0

func (i NodeGroupTypeArgs) ToNodeGroupTypeOutput() NodeGroupTypeOutput

func (NodeGroupTypeArgs) ToNodeGroupTypeOutputWithContext added in v0.28.0

func (i NodeGroupTypeArgs) ToNodeGroupTypeOutputWithContext(ctx context.Context) NodeGroupTypeOutput

type NodeGroupTypeInput added in v0.28.0

type NodeGroupTypeInput interface {
	pulumi.Input

	ToNodeGroupTypeOutput() NodeGroupTypeOutput
	ToNodeGroupTypeOutputWithContext(context.Context) NodeGroupTypeOutput
}

NodeGroupTypeInput is an input type that accepts NodeGroupTypeArgs and NodeGroupTypeOutput values. You can construct a concrete instance of `NodeGroupTypeInput` via:

NodeGroupTypeArgs{...}

type NodeGroupTypeOutput added in v0.28.0

type NodeGroupTypeOutput struct{ *pulumi.OutputState }

Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource.

func (NodeGroupTypeOutput) ElementType added in v0.28.0

func (NodeGroupTypeOutput) ElementType() reflect.Type

func (NodeGroupTypeOutput) Labels added in v0.28.0

Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn.

func (NodeGroupTypeOutput) Name added in v0.28.0

The Node group resource name (https://aip.dev/122).

func (NodeGroupTypeOutput) NodeGroupConfig added in v0.28.0

Optional. The node group instance group configuration.

func (NodeGroupTypeOutput) Roles added in v0.28.0

Node group roles.

func (NodeGroupTypeOutput) ToNodeGroupTypeOutput added in v0.28.0

func (o NodeGroupTypeOutput) ToNodeGroupTypeOutput() NodeGroupTypeOutput

func (NodeGroupTypeOutput) ToNodeGroupTypeOutputWithContext added in v0.28.0

func (o NodeGroupTypeOutput) ToNodeGroupTypeOutputWithContext(ctx context.Context) NodeGroupTypeOutput

type NodeInitializationAction

type NodeInitializationAction struct {
	// Cloud Storage URI of executable file.
	ExecutableFile string `pulumi:"executableFile"`
	// Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.
	ExecutionTimeout *string `pulumi:"executionTimeout"`
}

Specifies an executable to run on a fully configured node and a timeout period for executable completion.

type NodeInitializationActionArgs

type NodeInitializationActionArgs struct {
	// Cloud Storage URI of executable file.
	ExecutableFile pulumi.StringInput `pulumi:"executableFile"`
	// Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.
	ExecutionTimeout pulumi.StringPtrInput `pulumi:"executionTimeout"`
}

Specifies an executable to run on a fully configured node and a timeout period for executable completion.

func (NodeInitializationActionArgs) ElementType

func (NodeInitializationActionArgs) ToNodeInitializationActionOutput

func (i NodeInitializationActionArgs) ToNodeInitializationActionOutput() NodeInitializationActionOutput

func (NodeInitializationActionArgs) ToNodeInitializationActionOutputWithContext

func (i NodeInitializationActionArgs) ToNodeInitializationActionOutputWithContext(ctx context.Context) NodeInitializationActionOutput

type NodeInitializationActionArray

type NodeInitializationActionArray []NodeInitializationActionInput

func (NodeInitializationActionArray) ElementType

func (NodeInitializationActionArray) ToNodeInitializationActionArrayOutput

func (i NodeInitializationActionArray) ToNodeInitializationActionArrayOutput() NodeInitializationActionArrayOutput

func (NodeInitializationActionArray) ToNodeInitializationActionArrayOutputWithContext

func (i NodeInitializationActionArray) ToNodeInitializationActionArrayOutputWithContext(ctx context.Context) NodeInitializationActionArrayOutput

type NodeInitializationActionArrayInput

type NodeInitializationActionArrayInput interface {
	pulumi.Input

	ToNodeInitializationActionArrayOutput() NodeInitializationActionArrayOutput
	ToNodeInitializationActionArrayOutputWithContext(context.Context) NodeInitializationActionArrayOutput
}

NodeInitializationActionArrayInput is an input type that accepts NodeInitializationActionArray and NodeInitializationActionArrayOutput values. You can construct a concrete instance of `NodeInitializationActionArrayInput` via:

NodeInitializationActionArray{ NodeInitializationActionArgs{...} }

type NodeInitializationActionArrayOutput

type NodeInitializationActionArrayOutput struct{ *pulumi.OutputState }

func (NodeInitializationActionArrayOutput) ElementType

func (NodeInitializationActionArrayOutput) Index

func (NodeInitializationActionArrayOutput) ToNodeInitializationActionArrayOutput

func (o NodeInitializationActionArrayOutput) ToNodeInitializationActionArrayOutput() NodeInitializationActionArrayOutput

func (NodeInitializationActionArrayOutput) ToNodeInitializationActionArrayOutputWithContext

func (o NodeInitializationActionArrayOutput) ToNodeInitializationActionArrayOutputWithContext(ctx context.Context) NodeInitializationActionArrayOutput

type NodeInitializationActionInput

type NodeInitializationActionInput interface {
	pulumi.Input

	ToNodeInitializationActionOutput() NodeInitializationActionOutput
	ToNodeInitializationActionOutputWithContext(context.Context) NodeInitializationActionOutput
}

NodeInitializationActionInput is an input type that accepts NodeInitializationActionArgs and NodeInitializationActionOutput values. You can construct a concrete instance of `NodeInitializationActionInput` via:

NodeInitializationActionArgs{...}

type NodeInitializationActionOutput

type NodeInitializationActionOutput struct{ *pulumi.OutputState }

Specifies an executable to run on a fully configured node and a timeout period for executable completion.

func (NodeInitializationActionOutput) ElementType

func (NodeInitializationActionOutput) ExecutableFile

Cloud Storage URI of executable file.

func (NodeInitializationActionOutput) ExecutionTimeout

Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.

func (NodeInitializationActionOutput) ToNodeInitializationActionOutput

func (o NodeInitializationActionOutput) ToNodeInitializationActionOutput() NodeInitializationActionOutput

func (NodeInitializationActionOutput) ToNodeInitializationActionOutputWithContext

func (o NodeInitializationActionOutput) ToNodeInitializationActionOutputWithContext(ctx context.Context) NodeInitializationActionOutput

type NodeInitializationActionResponse

type NodeInitializationActionResponse struct {
	// Cloud Storage URI of executable file.
	ExecutableFile string `pulumi:"executableFile"`
	// Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.
	ExecutionTimeout string `pulumi:"executionTimeout"`
}

Specifies an executable to run on a fully configured node and a timeout period for executable completion.

type NodeInitializationActionResponseArrayOutput

type NodeInitializationActionResponseArrayOutput struct{ *pulumi.OutputState }

func (NodeInitializationActionResponseArrayOutput) ElementType

func (NodeInitializationActionResponseArrayOutput) Index

func (NodeInitializationActionResponseArrayOutput) ToNodeInitializationActionResponseArrayOutput

func (o NodeInitializationActionResponseArrayOutput) ToNodeInitializationActionResponseArrayOutput() NodeInitializationActionResponseArrayOutput

func (NodeInitializationActionResponseArrayOutput) ToNodeInitializationActionResponseArrayOutputWithContext

func (o NodeInitializationActionResponseArrayOutput) ToNodeInitializationActionResponseArrayOutputWithContext(ctx context.Context) NodeInitializationActionResponseArrayOutput

type NodeInitializationActionResponseOutput

type NodeInitializationActionResponseOutput struct{ *pulumi.OutputState }

Specifies an executable to run on a fully configured node and a timeout period for executable completion.

func (NodeInitializationActionResponseOutput) ElementType

func (NodeInitializationActionResponseOutput) ExecutableFile

Cloud Storage URI of executable file.

func (NodeInitializationActionResponseOutput) ExecutionTimeout

Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.

func (NodeInitializationActionResponseOutput) ToNodeInitializationActionResponseOutput

func (o NodeInitializationActionResponseOutput) ToNodeInitializationActionResponseOutput() NodeInitializationActionResponseOutput

func (NodeInitializationActionResponseOutput) ToNodeInitializationActionResponseOutputWithContext

func (o NodeInitializationActionResponseOutput) ToNodeInitializationActionResponseOutputWithContext(ctx context.Context) NodeInitializationActionResponseOutput

type OrderedJob

type OrderedJob struct {
	// Optional. Job is a Flink job.
	FlinkJob *FlinkJob `pulumi:"flinkJob"`
	// Optional. Job is a Hadoop job.
	HadoopJob *HadoopJob `pulumi:"hadoopJob"`
	// Optional. Job is a Hive job.
	HiveJob *HiveJob `pulumi:"hiveJob"`
	// Optional. The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given job.
	Labels map[string]string `pulumi:"labels"`
	// Optional. Job is a Pig job.
	PigJob *PigJob `pulumi:"pigJob"`
	// Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
	PrerequisiteStepIds []string `pulumi:"prerequisiteStepIds"`
	// Optional. Job is a Presto job.
	PrestoJob *PrestoJob `pulumi:"prestoJob"`
	// Optional. Job is a PySpark job.
	PysparkJob *PySparkJob `pulumi:"pysparkJob"`
	// Optional. Job scheduling configuration.
	Scheduling *JobScheduling `pulumi:"scheduling"`
	// Optional. Job is a Spark job.
	SparkJob *SparkJob `pulumi:"sparkJob"`
	// Optional. Job is a SparkR job.
	SparkRJob *SparkRJob `pulumi:"sparkRJob"`
	// Optional. Job is a SparkSql job.
	SparkSqlJob *SparkSqlJob `pulumi:"sparkSqlJob"`
	// The step id. The id must be unique among all jobs within the template.The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.
	StepId string `pulumi:"stepId"`
	// Optional. Job is a Trino job.
	TrinoJob *TrinoJob `pulumi:"trinoJob"`
}

A job executed by the workflow.

type OrderedJobArgs

type OrderedJobArgs struct {
	// Optional. Job is a Flink job.
	FlinkJob FlinkJobPtrInput `pulumi:"flinkJob"`
	// Optional. Job is a Hadoop job.
	HadoopJob HadoopJobPtrInput `pulumi:"hadoopJob"`
	// Optional. Job is a Hive job.
	HiveJob HiveJobPtrInput `pulumi:"hiveJob"`
	// Optional. The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given job.
	Labels pulumi.StringMapInput `pulumi:"labels"`
	// Optional. Job is a Pig job.
	PigJob PigJobPtrInput `pulumi:"pigJob"`
	// Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
	PrerequisiteStepIds pulumi.StringArrayInput `pulumi:"prerequisiteStepIds"`
	// Optional. Job is a Presto job.
	PrestoJob PrestoJobPtrInput `pulumi:"prestoJob"`
	// Optional. Job is a PySpark job.
	PysparkJob PySparkJobPtrInput `pulumi:"pysparkJob"`
	// Optional. Job scheduling configuration.
	Scheduling JobSchedulingPtrInput `pulumi:"scheduling"`
	// Optional. Job is a Spark job.
	SparkJob SparkJobPtrInput `pulumi:"sparkJob"`
	// Optional. Job is a SparkR job.
	SparkRJob SparkRJobPtrInput `pulumi:"sparkRJob"`
	// Optional. Job is a SparkSql job.
	SparkSqlJob SparkSqlJobPtrInput `pulumi:"sparkSqlJob"`
	// The step id. The id must be unique among all jobs within the template.The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.
	StepId pulumi.StringInput `pulumi:"stepId"`
	// Optional. Job is a Trino job.
	TrinoJob TrinoJobPtrInput `pulumi:"trinoJob"`
}

A job executed by the workflow.

func (OrderedJobArgs) ElementType

func (OrderedJobArgs) ElementType() reflect.Type

func (OrderedJobArgs) ToOrderedJobOutput

func (i OrderedJobArgs) ToOrderedJobOutput() OrderedJobOutput

func (OrderedJobArgs) ToOrderedJobOutputWithContext

func (i OrderedJobArgs) ToOrderedJobOutputWithContext(ctx context.Context) OrderedJobOutput

type OrderedJobArray

type OrderedJobArray []OrderedJobInput

func (OrderedJobArray) ElementType

func (OrderedJobArray) ElementType() reflect.Type

func (OrderedJobArray) ToOrderedJobArrayOutput

func (i OrderedJobArray) ToOrderedJobArrayOutput() OrderedJobArrayOutput

func (OrderedJobArray) ToOrderedJobArrayOutputWithContext

func (i OrderedJobArray) ToOrderedJobArrayOutputWithContext(ctx context.Context) OrderedJobArrayOutput

type OrderedJobArrayInput

type OrderedJobArrayInput interface {
	pulumi.Input

	ToOrderedJobArrayOutput() OrderedJobArrayOutput
	ToOrderedJobArrayOutputWithContext(context.Context) OrderedJobArrayOutput
}

OrderedJobArrayInput is an input type that accepts OrderedJobArray and OrderedJobArrayOutput values. You can construct a concrete instance of `OrderedJobArrayInput` via:

OrderedJobArray{ OrderedJobArgs{...} }

type OrderedJobArrayOutput

type OrderedJobArrayOutput struct{ *pulumi.OutputState }

func (OrderedJobArrayOutput) ElementType

func (OrderedJobArrayOutput) ElementType() reflect.Type

func (OrderedJobArrayOutput) Index

func (OrderedJobArrayOutput) ToOrderedJobArrayOutput

func (o OrderedJobArrayOutput) ToOrderedJobArrayOutput() OrderedJobArrayOutput

func (OrderedJobArrayOutput) ToOrderedJobArrayOutputWithContext

func (o OrderedJobArrayOutput) ToOrderedJobArrayOutputWithContext(ctx context.Context) OrderedJobArrayOutput

type OrderedJobInput

type OrderedJobInput interface {
	pulumi.Input

	ToOrderedJobOutput() OrderedJobOutput
	ToOrderedJobOutputWithContext(context.Context) OrderedJobOutput
}

OrderedJobInput is an input type that accepts OrderedJobArgs and OrderedJobOutput values. You can construct a concrete instance of `OrderedJobInput` via:

OrderedJobArgs{...}

type OrderedJobOutput

type OrderedJobOutput struct{ *pulumi.OutputState }

A job executed by the workflow.

func (OrderedJobOutput) ElementType

func (OrderedJobOutput) ElementType() reflect.Type

func (OrderedJobOutput) FlinkJob added in v0.32.0

func (o OrderedJobOutput) FlinkJob() FlinkJobPtrOutput

Optional. Job is a Flink job.

func (OrderedJobOutput) HadoopJob

func (o OrderedJobOutput) HadoopJob() HadoopJobPtrOutput

Optional. Job is a Hadoop job.

func (OrderedJobOutput) HiveJob

func (o OrderedJobOutput) HiveJob() HiveJobPtrOutput

Optional. Job is a Hive job.

func (OrderedJobOutput) Labels

Optional. The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given job.

func (OrderedJobOutput) PigJob

func (o OrderedJobOutput) PigJob() PigJobPtrOutput

Optional. Job is a Pig job.

func (OrderedJobOutput) PrerequisiteStepIds

func (o OrderedJobOutput) PrerequisiteStepIds() pulumi.StringArrayOutput

Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.

func (OrderedJobOutput) PrestoJob

func (o OrderedJobOutput) PrestoJob() PrestoJobPtrOutput

Optional. Job is a Presto job.

func (OrderedJobOutput) PysparkJob

func (o OrderedJobOutput) PysparkJob() PySparkJobPtrOutput

Optional. Job is a PySpark job.

func (OrderedJobOutput) Scheduling

Optional. Job scheduling configuration.

func (OrderedJobOutput) SparkJob

func (o OrderedJobOutput) SparkJob() SparkJobPtrOutput

Optional. Job is a Spark job.

func (OrderedJobOutput) SparkRJob

func (o OrderedJobOutput) SparkRJob() SparkRJobPtrOutput

Optional. Job is a SparkR job.

func (OrderedJobOutput) SparkSqlJob

func (o OrderedJobOutput) SparkSqlJob() SparkSqlJobPtrOutput

Optional. Job is a SparkSql job.

func (OrderedJobOutput) StepId

The step id. The id must be unique among all jobs within the template.The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.

func (OrderedJobOutput) ToOrderedJobOutput

func (o OrderedJobOutput) ToOrderedJobOutput() OrderedJobOutput

func (OrderedJobOutput) ToOrderedJobOutputWithContext

func (o OrderedJobOutput) ToOrderedJobOutputWithContext(ctx context.Context) OrderedJobOutput

func (OrderedJobOutput) TrinoJob added in v0.26.0

func (o OrderedJobOutput) TrinoJob() TrinoJobPtrOutput

Optional. Job is a Trino job.

type OrderedJobResponse

type OrderedJobResponse struct {
	// Optional. Job is a Flink job.
	FlinkJob FlinkJobResponse `pulumi:"flinkJob"`
	// Optional. Job is a Hadoop job.
	HadoopJob HadoopJobResponse `pulumi:"hadoopJob"`
	// Optional. Job is a Hive job.
	HiveJob HiveJobResponse `pulumi:"hiveJob"`
	// Optional. The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given job.
	Labels map[string]string `pulumi:"labels"`
	// Optional. Job is a Pig job.
	PigJob PigJobResponse `pulumi:"pigJob"`
	// Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
	PrerequisiteStepIds []string `pulumi:"prerequisiteStepIds"`
	// Optional. Job is a Presto job.
	PrestoJob PrestoJobResponse `pulumi:"prestoJob"`
	// Optional. Job is a PySpark job.
	PysparkJob PySparkJobResponse `pulumi:"pysparkJob"`
	// Optional. Job scheduling configuration.
	Scheduling JobSchedulingResponse `pulumi:"scheduling"`
	// Optional. Job is a Spark job.
	SparkJob SparkJobResponse `pulumi:"sparkJob"`
	// Optional. Job is a SparkR job.
	SparkRJob SparkRJobResponse `pulumi:"sparkRJob"`
	// Optional. Job is a SparkSql job.
	SparkSqlJob SparkSqlJobResponse `pulumi:"sparkSqlJob"`
	// The step id. The id must be unique among all jobs within the template.The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.
	StepId string `pulumi:"stepId"`
	// Optional. Job is a Trino job.
	TrinoJob TrinoJobResponse `pulumi:"trinoJob"`
}

A job executed by the workflow.

type OrderedJobResponseArrayOutput

type OrderedJobResponseArrayOutput struct{ *pulumi.OutputState }

func (OrderedJobResponseArrayOutput) ElementType

func (OrderedJobResponseArrayOutput) Index

func (OrderedJobResponseArrayOutput) ToOrderedJobResponseArrayOutput

func (o OrderedJobResponseArrayOutput) ToOrderedJobResponseArrayOutput() OrderedJobResponseArrayOutput

func (OrderedJobResponseArrayOutput) ToOrderedJobResponseArrayOutputWithContext

func (o OrderedJobResponseArrayOutput) ToOrderedJobResponseArrayOutputWithContext(ctx context.Context) OrderedJobResponseArrayOutput

type OrderedJobResponseOutput

type OrderedJobResponseOutput struct{ *pulumi.OutputState }

A job executed by the workflow.

func (OrderedJobResponseOutput) ElementType

func (OrderedJobResponseOutput) ElementType() reflect.Type

func (OrderedJobResponseOutput) FlinkJob added in v0.32.0

Optional. Job is a Flink job.

func (OrderedJobResponseOutput) HadoopJob

Optional. Job is a Hadoop job.

func (OrderedJobResponseOutput) HiveJob

Optional. Job is a Hive job.

func (OrderedJobResponseOutput) Labels

Optional. The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given job.

func (OrderedJobResponseOutput) PigJob

Optional. Job is a Pig job.

func (OrderedJobResponseOutput) PrerequisiteStepIds

func (o OrderedJobResponseOutput) PrerequisiteStepIds() pulumi.StringArrayOutput

Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.

func (OrderedJobResponseOutput) PrestoJob

Optional. Job is a Presto job.

func (OrderedJobResponseOutput) PysparkJob

Optional. Job is a PySpark job.

func (OrderedJobResponseOutput) Scheduling

Optional. Job scheduling configuration.

func (OrderedJobResponseOutput) SparkJob

Optional. Job is a Spark job.

func (OrderedJobResponseOutput) SparkRJob

Optional. Job is a SparkR job.

func (OrderedJobResponseOutput) SparkSqlJob

Optional. Job is a SparkSql job.

func (OrderedJobResponseOutput) StepId

The step id. The id must be unique among all jobs within the template.The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.

func (OrderedJobResponseOutput) ToOrderedJobResponseOutput

func (o OrderedJobResponseOutput) ToOrderedJobResponseOutput() OrderedJobResponseOutput

func (OrderedJobResponseOutput) ToOrderedJobResponseOutputWithContext

func (o OrderedJobResponseOutput) ToOrderedJobResponseOutputWithContext(ctx context.Context) OrderedJobResponseOutput

func (OrderedJobResponseOutput) TrinoJob added in v0.26.0

Optional. Job is a Trino job.

type ParameterValidation

type ParameterValidation struct {
	// Validation based on regular expressions.
	Regex *RegexValidation `pulumi:"regex"`
	// Validation based on a list of allowed values.
	Values *ValueValidation `pulumi:"values"`
}

Configuration for parameter validation.

type ParameterValidationArgs

type ParameterValidationArgs struct {
	// Validation based on regular expressions.
	Regex RegexValidationPtrInput `pulumi:"regex"`
	// Validation based on a list of allowed values.
	Values ValueValidationPtrInput `pulumi:"values"`
}

Configuration for parameter validation.

func (ParameterValidationArgs) ElementType

func (ParameterValidationArgs) ElementType() reflect.Type

func (ParameterValidationArgs) ToParameterValidationOutput

func (i ParameterValidationArgs) ToParameterValidationOutput() ParameterValidationOutput

func (ParameterValidationArgs) ToParameterValidationOutputWithContext

func (i ParameterValidationArgs) ToParameterValidationOutputWithContext(ctx context.Context) ParameterValidationOutput

func (ParameterValidationArgs) ToParameterValidationPtrOutput

func (i ParameterValidationArgs) ToParameterValidationPtrOutput() ParameterValidationPtrOutput

func (ParameterValidationArgs) ToParameterValidationPtrOutputWithContext

func (i ParameterValidationArgs) ToParameterValidationPtrOutputWithContext(ctx context.Context) ParameterValidationPtrOutput

type ParameterValidationInput

type ParameterValidationInput interface {
	pulumi.Input

	ToParameterValidationOutput() ParameterValidationOutput
	ToParameterValidationOutputWithContext(context.Context) ParameterValidationOutput
}

ParameterValidationInput is an input type that accepts ParameterValidationArgs and ParameterValidationOutput values. You can construct a concrete instance of `ParameterValidationInput` via:

ParameterValidationArgs{...}

type ParameterValidationOutput

type ParameterValidationOutput struct{ *pulumi.OutputState }

Configuration for parameter validation.

func (ParameterValidationOutput) ElementType

func (ParameterValidationOutput) ElementType() reflect.Type

func (ParameterValidationOutput) Regex

Validation based on regular expressions.

func (ParameterValidationOutput) ToParameterValidationOutput

func (o ParameterValidationOutput) ToParameterValidationOutput() ParameterValidationOutput

func (ParameterValidationOutput) ToParameterValidationOutputWithContext

func (o ParameterValidationOutput) ToParameterValidationOutputWithContext(ctx context.Context) ParameterValidationOutput

func (ParameterValidationOutput) ToParameterValidationPtrOutput

func (o ParameterValidationOutput) ToParameterValidationPtrOutput() ParameterValidationPtrOutput

func (ParameterValidationOutput) ToParameterValidationPtrOutputWithContext

func (o ParameterValidationOutput) ToParameterValidationPtrOutputWithContext(ctx context.Context) ParameterValidationPtrOutput

func (ParameterValidationOutput) Values

Validation based on a list of allowed values.

type ParameterValidationPtrInput

type ParameterValidationPtrInput interface {
	pulumi.Input

	ToParameterValidationPtrOutput() ParameterValidationPtrOutput
	ToParameterValidationPtrOutputWithContext(context.Context) ParameterValidationPtrOutput
}

ParameterValidationPtrInput is an input type that accepts ParameterValidationArgs, ParameterValidationPtr and ParameterValidationPtrOutput values. You can construct a concrete instance of `ParameterValidationPtrInput` via:

        ParameterValidationArgs{...}

or:

        nil

type ParameterValidationPtrOutput

type ParameterValidationPtrOutput struct{ *pulumi.OutputState }

func (ParameterValidationPtrOutput) Elem

func (ParameterValidationPtrOutput) ElementType

func (ParameterValidationPtrOutput) Regex

Validation based on regular expressions.

func (ParameterValidationPtrOutput) ToParameterValidationPtrOutput

func (o ParameterValidationPtrOutput) ToParameterValidationPtrOutput() ParameterValidationPtrOutput

func (ParameterValidationPtrOutput) ToParameterValidationPtrOutputWithContext

func (o ParameterValidationPtrOutput) ToParameterValidationPtrOutputWithContext(ctx context.Context) ParameterValidationPtrOutput

func (ParameterValidationPtrOutput) Values

Validation based on a list of allowed values.

type ParameterValidationResponse

type ParameterValidationResponse struct {
	// Validation based on regular expressions.
	Regex RegexValidationResponse `pulumi:"regex"`
	// Validation based on a list of allowed values.
	Values ValueValidationResponse `pulumi:"values"`
}

Configuration for parameter validation.

type ParameterValidationResponseOutput

type ParameterValidationResponseOutput struct{ *pulumi.OutputState }

Configuration for parameter validation.

func (ParameterValidationResponseOutput) ElementType

func (ParameterValidationResponseOutput) Regex

Validation based on regular expressions.

func (ParameterValidationResponseOutput) ToParameterValidationResponseOutput

func (o ParameterValidationResponseOutput) ToParameterValidationResponseOutput() ParameterValidationResponseOutput

func (ParameterValidationResponseOutput) ToParameterValidationResponseOutputWithContext

func (o ParameterValidationResponseOutput) ToParameterValidationResponseOutputWithContext(ctx context.Context) ParameterValidationResponseOutput

func (ParameterValidationResponseOutput) Values

Validation based on a list of allowed values.

type PeripheralsConfig added in v0.12.0

type PeripheralsConfig struct {
	// Optional. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[region]/services/[service_id]
	MetastoreService *string `pulumi:"metastoreService"`
	// Optional. The Spark History Server configuration for the workload.
	SparkHistoryServerConfig *SparkHistoryServerConfig `pulumi:"sparkHistoryServerConfig"`
}

Auxiliary services configuration for a workload.

type PeripheralsConfigArgs added in v0.12.0

type PeripheralsConfigArgs struct {
	// Optional. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[region]/services/[service_id]
	MetastoreService pulumi.StringPtrInput `pulumi:"metastoreService"`
	// Optional. The Spark History Server configuration for the workload.
	SparkHistoryServerConfig SparkHistoryServerConfigPtrInput `pulumi:"sparkHistoryServerConfig"`
}

Auxiliary services configuration for a workload.

func (PeripheralsConfigArgs) ElementType added in v0.12.0

func (PeripheralsConfigArgs) ElementType() reflect.Type

func (PeripheralsConfigArgs) ToPeripheralsConfigOutput added in v0.12.0

func (i PeripheralsConfigArgs) ToPeripheralsConfigOutput() PeripheralsConfigOutput

func (PeripheralsConfigArgs) ToPeripheralsConfigOutputWithContext added in v0.12.0

func (i PeripheralsConfigArgs) ToPeripheralsConfigOutputWithContext(ctx context.Context) PeripheralsConfigOutput

func (PeripheralsConfigArgs) ToPeripheralsConfigPtrOutput added in v0.12.0

func (i PeripheralsConfigArgs) ToPeripheralsConfigPtrOutput() PeripheralsConfigPtrOutput

func (PeripheralsConfigArgs) ToPeripheralsConfigPtrOutputWithContext added in v0.12.0

func (i PeripheralsConfigArgs) ToPeripheralsConfigPtrOutputWithContext(ctx context.Context) PeripheralsConfigPtrOutput

type PeripheralsConfigInput added in v0.12.0

type PeripheralsConfigInput interface {
	pulumi.Input

	ToPeripheralsConfigOutput() PeripheralsConfigOutput
	ToPeripheralsConfigOutputWithContext(context.Context) PeripheralsConfigOutput
}

PeripheralsConfigInput is an input type that accepts PeripheralsConfigArgs and PeripheralsConfigOutput values. You can construct a concrete instance of `PeripheralsConfigInput` via:

PeripheralsConfigArgs{...}

type PeripheralsConfigOutput added in v0.12.0

type PeripheralsConfigOutput struct{ *pulumi.OutputState }

Auxiliary services configuration for a workload.

func (PeripheralsConfigOutput) ElementType added in v0.12.0

func (PeripheralsConfigOutput) ElementType() reflect.Type

func (PeripheralsConfigOutput) MetastoreService added in v0.12.0

func (o PeripheralsConfigOutput) MetastoreService() pulumi.StringPtrOutput

Optional. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[region]/services/[service_id]

func (PeripheralsConfigOutput) SparkHistoryServerConfig added in v0.12.0

func (o PeripheralsConfigOutput) SparkHistoryServerConfig() SparkHistoryServerConfigPtrOutput

Optional. The Spark History Server configuration for the workload.

func (PeripheralsConfigOutput) ToPeripheralsConfigOutput added in v0.12.0

func (o PeripheralsConfigOutput) ToPeripheralsConfigOutput() PeripheralsConfigOutput

func (PeripheralsConfigOutput) ToPeripheralsConfigOutputWithContext added in v0.12.0

func (o PeripheralsConfigOutput) ToPeripheralsConfigOutputWithContext(ctx context.Context) PeripheralsConfigOutput

func (PeripheralsConfigOutput) ToPeripheralsConfigPtrOutput added in v0.12.0

func (o PeripheralsConfigOutput) ToPeripheralsConfigPtrOutput() PeripheralsConfigPtrOutput

func (PeripheralsConfigOutput) ToPeripheralsConfigPtrOutputWithContext added in v0.12.0

func (o PeripheralsConfigOutput) ToPeripheralsConfigPtrOutputWithContext(ctx context.Context) PeripheralsConfigPtrOutput

type PeripheralsConfigPtrInput added in v0.12.0

type PeripheralsConfigPtrInput interface {
	pulumi.Input

	ToPeripheralsConfigPtrOutput() PeripheralsConfigPtrOutput
	ToPeripheralsConfigPtrOutputWithContext(context.Context) PeripheralsConfigPtrOutput
}

PeripheralsConfigPtrInput is an input type that accepts PeripheralsConfigArgs, PeripheralsConfigPtr and PeripheralsConfigPtrOutput values. You can construct a concrete instance of `PeripheralsConfigPtrInput` via:

        PeripheralsConfigArgs{...}

or:

        nil

func PeripheralsConfigPtr added in v0.12.0

func PeripheralsConfigPtr(v *PeripheralsConfigArgs) PeripheralsConfigPtrInput

type PeripheralsConfigPtrOutput added in v0.12.0

type PeripheralsConfigPtrOutput struct{ *pulumi.OutputState }

func (PeripheralsConfigPtrOutput) Elem added in v0.12.0

func (PeripheralsConfigPtrOutput) ElementType added in v0.12.0

func (PeripheralsConfigPtrOutput) ElementType() reflect.Type

func (PeripheralsConfigPtrOutput) MetastoreService added in v0.12.0

func (o PeripheralsConfigPtrOutput) MetastoreService() pulumi.StringPtrOutput

Optional. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[region]/services/[service_id]

func (PeripheralsConfigPtrOutput) SparkHistoryServerConfig added in v0.12.0

Optional. The Spark History Server configuration for the workload.

func (PeripheralsConfigPtrOutput) ToPeripheralsConfigPtrOutput added in v0.12.0

func (o PeripheralsConfigPtrOutput) ToPeripheralsConfigPtrOutput() PeripheralsConfigPtrOutput

func (PeripheralsConfigPtrOutput) ToPeripheralsConfigPtrOutputWithContext added in v0.12.0

func (o PeripheralsConfigPtrOutput) ToPeripheralsConfigPtrOutputWithContext(ctx context.Context) PeripheralsConfigPtrOutput

type PeripheralsConfigResponse added in v0.12.0

type PeripheralsConfigResponse struct {
	// Optional. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[region]/services/[service_id]
	MetastoreService string `pulumi:"metastoreService"`
	// Optional. The Spark History Server configuration for the workload.
	SparkHistoryServerConfig SparkHistoryServerConfigResponse `pulumi:"sparkHistoryServerConfig"`
}

Auxiliary services configuration for a workload.

type PeripheralsConfigResponseOutput added in v0.12.0

type PeripheralsConfigResponseOutput struct{ *pulumi.OutputState }

Auxiliary services configuration for a workload.

func (PeripheralsConfigResponseOutput) ElementType added in v0.12.0

func (PeripheralsConfigResponseOutput) MetastoreService added in v0.12.0

Optional. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[region]/services/[service_id]

func (PeripheralsConfigResponseOutput) SparkHistoryServerConfig added in v0.12.0

Optional. The Spark History Server configuration for the workload.

func (PeripheralsConfigResponseOutput) ToPeripheralsConfigResponseOutput added in v0.12.0

func (o PeripheralsConfigResponseOutput) ToPeripheralsConfigResponseOutput() PeripheralsConfigResponseOutput

func (PeripheralsConfigResponseOutput) ToPeripheralsConfigResponseOutputWithContext added in v0.12.0

func (o PeripheralsConfigResponseOutput) ToPeripheralsConfigResponseOutputWithContext(ctx context.Context) PeripheralsConfigResponseOutput

type PigJob

type PigJob struct {
	// Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
	ContinueOnFailure *bool `pulumi:"continueOnFailure"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *LoggingConfig `pulumi:"loggingConfig"`
	// Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains the Pig queries.
	QueryFileUri *string `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList *QueryList `pulumi:"queryList"`
	// Optional. Mapping of query variable names to values (equivalent to the Pig command: name=[value]).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

A Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN.

type PigJobArgs

type PigJobArgs struct {
	// Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
	ContinueOnFailure pulumi.BoolPtrInput `pulumi:"continueOnFailure"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigPtrInput `pulumi:"loggingConfig"`
	// Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains the Pig queries.
	QueryFileUri pulumi.StringPtrInput `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList QueryListPtrInput `pulumi:"queryList"`
	// Optional. Mapping of query variable names to values (equivalent to the Pig command: name=[value]).
	ScriptVariables pulumi.StringMapInput `pulumi:"scriptVariables"`
}

A Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN.

func (PigJobArgs) ElementType

func (PigJobArgs) ElementType() reflect.Type

func (PigJobArgs) ToPigJobOutput

func (i PigJobArgs) ToPigJobOutput() PigJobOutput

func (PigJobArgs) ToPigJobOutputWithContext

func (i PigJobArgs) ToPigJobOutputWithContext(ctx context.Context) PigJobOutput

func (PigJobArgs) ToPigJobPtrOutput

func (i PigJobArgs) ToPigJobPtrOutput() PigJobPtrOutput

func (PigJobArgs) ToPigJobPtrOutputWithContext

func (i PigJobArgs) ToPigJobPtrOutputWithContext(ctx context.Context) PigJobPtrOutput

type PigJobInput

type PigJobInput interface {
	pulumi.Input

	ToPigJobOutput() PigJobOutput
	ToPigJobOutputWithContext(context.Context) PigJobOutput
}

PigJobInput is an input type that accepts PigJobArgs and PigJobOutput values. You can construct a concrete instance of `PigJobInput` via:

PigJobArgs{...}

type PigJobOutput

type PigJobOutput struct{ *pulumi.OutputState }

A Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN.

func (PigJobOutput) ContinueOnFailure

func (o PigJobOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.

func (PigJobOutput) ElementType

func (PigJobOutput) ElementType() reflect.Type

func (PigJobOutput) JarFileUris

func (o PigJobOutput) JarFileUris() pulumi.StringArrayOutput

Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.

func (PigJobOutput) LoggingConfig

func (o PigJobOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (PigJobOutput) Properties

func (o PigJobOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.

func (PigJobOutput) QueryFileUri

func (o PigJobOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains the Pig queries.

func (PigJobOutput) QueryList

func (o PigJobOutput) QueryList() QueryListPtrOutput

A list of queries.

func (PigJobOutput) ScriptVariables

func (o PigJobOutput) ScriptVariables() pulumi.StringMapOutput

Optional. Mapping of query variable names to values (equivalent to the Pig command: name=[value]).

func (PigJobOutput) ToPigJobOutput

func (o PigJobOutput) ToPigJobOutput() PigJobOutput

func (PigJobOutput) ToPigJobOutputWithContext

func (o PigJobOutput) ToPigJobOutputWithContext(ctx context.Context) PigJobOutput

func (PigJobOutput) ToPigJobPtrOutput

func (o PigJobOutput) ToPigJobPtrOutput() PigJobPtrOutput

func (PigJobOutput) ToPigJobPtrOutputWithContext

func (o PigJobOutput) ToPigJobPtrOutputWithContext(ctx context.Context) PigJobPtrOutput

type PigJobPtrInput

type PigJobPtrInput interface {
	pulumi.Input

	ToPigJobPtrOutput() PigJobPtrOutput
	ToPigJobPtrOutputWithContext(context.Context) PigJobPtrOutput
}

PigJobPtrInput is an input type that accepts PigJobArgs, PigJobPtr and PigJobPtrOutput values. You can construct a concrete instance of `PigJobPtrInput` via:

        PigJobArgs{...}

or:

        nil

func PigJobPtr

func PigJobPtr(v *PigJobArgs) PigJobPtrInput

type PigJobPtrOutput

type PigJobPtrOutput struct{ *pulumi.OutputState }

func (PigJobPtrOutput) ContinueOnFailure

func (o PigJobPtrOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.

func (PigJobPtrOutput) Elem

func (o PigJobPtrOutput) Elem() PigJobOutput

func (PigJobPtrOutput) ElementType

func (PigJobPtrOutput) ElementType() reflect.Type

func (PigJobPtrOutput) JarFileUris

func (o PigJobPtrOutput) JarFileUris() pulumi.StringArrayOutput

Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.

func (PigJobPtrOutput) LoggingConfig

func (o PigJobPtrOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (PigJobPtrOutput) Properties

func (o PigJobPtrOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.

func (PigJobPtrOutput) QueryFileUri

func (o PigJobPtrOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains the Pig queries.

func (PigJobPtrOutput) QueryList

func (o PigJobPtrOutput) QueryList() QueryListPtrOutput

A list of queries.

func (PigJobPtrOutput) ScriptVariables

func (o PigJobPtrOutput) ScriptVariables() pulumi.StringMapOutput

Optional. Mapping of query variable names to values (equivalent to the Pig command: name=[value]).

func (PigJobPtrOutput) ToPigJobPtrOutput

func (o PigJobPtrOutput) ToPigJobPtrOutput() PigJobPtrOutput

func (PigJobPtrOutput) ToPigJobPtrOutputWithContext

func (o PigJobPtrOutput) ToPigJobPtrOutputWithContext(ctx context.Context) PigJobPtrOutput

type PigJobResponse

type PigJobResponse struct {
	// Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
	ContinueOnFailure bool `pulumi:"continueOnFailure"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigResponse `pulumi:"loggingConfig"`
	// Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains the Pig queries.
	QueryFileUri string `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList QueryListResponse `pulumi:"queryList"`
	// Optional. Mapping of query variable names to values (equivalent to the Pig command: name=[value]).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

A Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN.

type PigJobResponseOutput

type PigJobResponseOutput struct{ *pulumi.OutputState }

A Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN.

func (PigJobResponseOutput) ContinueOnFailure

func (o PigJobResponseOutput) ContinueOnFailure() pulumi.BoolOutput

Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.

func (PigJobResponseOutput) ElementType

func (PigJobResponseOutput) ElementType() reflect.Type

func (PigJobResponseOutput) JarFileUris

Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.

func (PigJobResponseOutput) LoggingConfig

Optional. The runtime log config for job execution.

func (PigJobResponseOutput) Properties

Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.

func (PigJobResponseOutput) QueryFileUri

func (o PigJobResponseOutput) QueryFileUri() pulumi.StringOutput

The HCFS URI of the script that contains the Pig queries.

func (PigJobResponseOutput) QueryList

A list of queries.

func (PigJobResponseOutput) ScriptVariables

func (o PigJobResponseOutput) ScriptVariables() pulumi.StringMapOutput

Optional. Mapping of query variable names to values (equivalent to the Pig command: name=[value]).

func (PigJobResponseOutput) ToPigJobResponseOutput

func (o PigJobResponseOutput) ToPigJobResponseOutput() PigJobResponseOutput

func (PigJobResponseOutput) ToPigJobResponseOutputWithContext

func (o PigJobResponseOutput) ToPigJobResponseOutputWithContext(ctx context.Context) PigJobResponseOutput

type PrestoJob

type PrestoJob struct {
	// Optional. Presto client tags to attach to this query
	ClientTags []string `pulumi:"clientTags"`
	// Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
	ContinueOnFailure *bool `pulumi:"continueOnFailure"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *LoggingConfig `pulumi:"loggingConfig"`
	// Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats
	OutputFormat *string `pulumi:"outputFormat"`
	// Optional. A mapping of property names to values. Used to set Presto session properties (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri *string `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList *QueryList `pulumi:"queryList"`
}

A Dataproc job for running Presto (https://prestosql.io/) queries. IMPORTANT: The Dataproc Presto Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be enabled when the cluster is created to submit a Presto job to the cluster.

type PrestoJobArgs

type PrestoJobArgs struct {
	// Optional. Presto client tags to attach to this query
	ClientTags pulumi.StringArrayInput `pulumi:"clientTags"`
	// Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
	ContinueOnFailure pulumi.BoolPtrInput `pulumi:"continueOnFailure"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigPtrInput `pulumi:"loggingConfig"`
	// Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats
	OutputFormat pulumi.StringPtrInput `pulumi:"outputFormat"`
	// Optional. A mapping of property names to values. Used to set Presto session properties (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri pulumi.StringPtrInput `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList QueryListPtrInput `pulumi:"queryList"`
}

A Dataproc job for running Presto (https://prestosql.io/) queries. IMPORTANT: The Dataproc Presto Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be enabled when the cluster is created to submit a Presto job to the cluster.

func (PrestoJobArgs) ElementType

func (PrestoJobArgs) ElementType() reflect.Type

func (PrestoJobArgs) ToPrestoJobOutput

func (i PrestoJobArgs) ToPrestoJobOutput() PrestoJobOutput

func (PrestoJobArgs) ToPrestoJobOutputWithContext

func (i PrestoJobArgs) ToPrestoJobOutputWithContext(ctx context.Context) PrestoJobOutput

func (PrestoJobArgs) ToPrestoJobPtrOutput

func (i PrestoJobArgs) ToPrestoJobPtrOutput() PrestoJobPtrOutput

func (PrestoJobArgs) ToPrestoJobPtrOutputWithContext

func (i PrestoJobArgs) ToPrestoJobPtrOutputWithContext(ctx context.Context) PrestoJobPtrOutput

type PrestoJobInput

type PrestoJobInput interface {
	pulumi.Input

	ToPrestoJobOutput() PrestoJobOutput
	ToPrestoJobOutputWithContext(context.Context) PrestoJobOutput
}

PrestoJobInput is an input type that accepts PrestoJobArgs and PrestoJobOutput values. You can construct a concrete instance of `PrestoJobInput` via:

PrestoJobArgs{...}

type PrestoJobOutput

type PrestoJobOutput struct{ *pulumi.OutputState }

A Dataproc job for running Presto (https://prestosql.io/) queries. IMPORTANT: The Dataproc Presto Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be enabled when the cluster is created to submit a Presto job to the cluster.

func (PrestoJobOutput) ClientTags

func (o PrestoJobOutput) ClientTags() pulumi.StringArrayOutput

Optional. Presto client tags to attach to this query

func (PrestoJobOutput) ContinueOnFailure

func (o PrestoJobOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.

func (PrestoJobOutput) ElementType

func (PrestoJobOutput) ElementType() reflect.Type

func (PrestoJobOutput) LoggingConfig

func (o PrestoJobOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (PrestoJobOutput) OutputFormat

func (o PrestoJobOutput) OutputFormat() pulumi.StringPtrOutput

Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats

func (PrestoJobOutput) Properties

func (o PrestoJobOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names to values. Used to set Presto session properties (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI

func (PrestoJobOutput) QueryFileUri

func (o PrestoJobOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains SQL queries.

func (PrestoJobOutput) QueryList

func (o PrestoJobOutput) QueryList() QueryListPtrOutput

A list of queries.

func (PrestoJobOutput) ToPrestoJobOutput

func (o PrestoJobOutput) ToPrestoJobOutput() PrestoJobOutput

func (PrestoJobOutput) ToPrestoJobOutputWithContext

func (o PrestoJobOutput) ToPrestoJobOutputWithContext(ctx context.Context) PrestoJobOutput

func (PrestoJobOutput) ToPrestoJobPtrOutput

func (o PrestoJobOutput) ToPrestoJobPtrOutput() PrestoJobPtrOutput

func (PrestoJobOutput) ToPrestoJobPtrOutputWithContext

func (o PrestoJobOutput) ToPrestoJobPtrOutputWithContext(ctx context.Context) PrestoJobPtrOutput

type PrestoJobPtrInput

type PrestoJobPtrInput interface {
	pulumi.Input

	ToPrestoJobPtrOutput() PrestoJobPtrOutput
	ToPrestoJobPtrOutputWithContext(context.Context) PrestoJobPtrOutput
}

PrestoJobPtrInput is an input type that accepts PrestoJobArgs, PrestoJobPtr and PrestoJobPtrOutput values. You can construct a concrete instance of `PrestoJobPtrInput` via:

        PrestoJobArgs{...}

or:

        nil

func PrestoJobPtr

func PrestoJobPtr(v *PrestoJobArgs) PrestoJobPtrInput

type PrestoJobPtrOutput

type PrestoJobPtrOutput struct{ *pulumi.OutputState }

func (PrestoJobPtrOutput) ClientTags

Optional. Presto client tags to attach to this query

func (PrestoJobPtrOutput) ContinueOnFailure

func (o PrestoJobPtrOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.

func (PrestoJobPtrOutput) Elem

func (PrestoJobPtrOutput) ElementType

func (PrestoJobPtrOutput) ElementType() reflect.Type

func (PrestoJobPtrOutput) LoggingConfig

func (o PrestoJobPtrOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (PrestoJobPtrOutput) OutputFormat

func (o PrestoJobPtrOutput) OutputFormat() pulumi.StringPtrOutput

Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats

func (PrestoJobPtrOutput) Properties

func (o PrestoJobPtrOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names to values. Used to set Presto session properties (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI

func (PrestoJobPtrOutput) QueryFileUri

func (o PrestoJobPtrOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains SQL queries.

func (PrestoJobPtrOutput) QueryList

func (o PrestoJobPtrOutput) QueryList() QueryListPtrOutput

A list of queries.

func (PrestoJobPtrOutput) ToPrestoJobPtrOutput

func (o PrestoJobPtrOutput) ToPrestoJobPtrOutput() PrestoJobPtrOutput

func (PrestoJobPtrOutput) ToPrestoJobPtrOutputWithContext

func (o PrestoJobPtrOutput) ToPrestoJobPtrOutputWithContext(ctx context.Context) PrestoJobPtrOutput

type PrestoJobResponse

type PrestoJobResponse struct {
	// Optional. Presto client tags to attach to this query
	ClientTags []string `pulumi:"clientTags"`
	// Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
	ContinueOnFailure bool `pulumi:"continueOnFailure"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigResponse `pulumi:"loggingConfig"`
	// Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats
	OutputFormat string `pulumi:"outputFormat"`
	// Optional. A mapping of property names to values. Used to set Presto session properties (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri string `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList QueryListResponse `pulumi:"queryList"`
}

A Dataproc job for running Presto (https://prestosql.io/) queries. IMPORTANT: The Dataproc Presto Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be enabled when the cluster is created to submit a Presto job to the cluster.

type PrestoJobResponseOutput

type PrestoJobResponseOutput struct{ *pulumi.OutputState }

A Dataproc job for running Presto (https://prestosql.io/) queries. IMPORTANT: The Dataproc Presto Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be enabled when the cluster is created to submit a Presto job to the cluster.

func (PrestoJobResponseOutput) ClientTags

Optional. Presto client tags to attach to this query

func (PrestoJobResponseOutput) ContinueOnFailure

func (o PrestoJobResponseOutput) ContinueOnFailure() pulumi.BoolOutput

Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.

func (PrestoJobResponseOutput) ElementType

func (PrestoJobResponseOutput) ElementType() reflect.Type

func (PrestoJobResponseOutput) LoggingConfig

Optional. The runtime log config for job execution.

func (PrestoJobResponseOutput) OutputFormat

func (o PrestoJobResponseOutput) OutputFormat() pulumi.StringOutput

Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats

func (PrestoJobResponseOutput) Properties

Optional. A mapping of property names to values. Used to set Presto session properties (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI

func (PrestoJobResponseOutput) QueryFileUri

func (o PrestoJobResponseOutput) QueryFileUri() pulumi.StringOutput

The HCFS URI of the script that contains SQL queries.

func (PrestoJobResponseOutput) QueryList

A list of queries.

func (PrestoJobResponseOutput) ToPrestoJobResponseOutput

func (o PrestoJobResponseOutput) ToPrestoJobResponseOutput() PrestoJobResponseOutput

func (PrestoJobResponseOutput) ToPrestoJobResponseOutputWithContext

func (o PrestoJobResponseOutput) ToPrestoJobResponseOutputWithContext(ctx context.Context) PrestoJobResponseOutput

type PyPiRepositoryConfig added in v0.32.0

type PyPiRepositoryConfig struct {
	// Optional. PyPi repository address
	PypiRepository *string `pulumi:"pypiRepository"`
}

Configuration for PyPi repository

type PyPiRepositoryConfigArgs added in v0.32.0

type PyPiRepositoryConfigArgs struct {
	// Optional. PyPi repository address
	PypiRepository pulumi.StringPtrInput `pulumi:"pypiRepository"`
}

Configuration for PyPi repository

func (PyPiRepositoryConfigArgs) ElementType added in v0.32.0

func (PyPiRepositoryConfigArgs) ElementType() reflect.Type

func (PyPiRepositoryConfigArgs) ToPyPiRepositoryConfigOutput added in v0.32.0

func (i PyPiRepositoryConfigArgs) ToPyPiRepositoryConfigOutput() PyPiRepositoryConfigOutput

func (PyPiRepositoryConfigArgs) ToPyPiRepositoryConfigOutputWithContext added in v0.32.0

func (i PyPiRepositoryConfigArgs) ToPyPiRepositoryConfigOutputWithContext(ctx context.Context) PyPiRepositoryConfigOutput

func (PyPiRepositoryConfigArgs) ToPyPiRepositoryConfigPtrOutput added in v0.32.0

func (i PyPiRepositoryConfigArgs) ToPyPiRepositoryConfigPtrOutput() PyPiRepositoryConfigPtrOutput

func (PyPiRepositoryConfigArgs) ToPyPiRepositoryConfigPtrOutputWithContext added in v0.32.0

func (i PyPiRepositoryConfigArgs) ToPyPiRepositoryConfigPtrOutputWithContext(ctx context.Context) PyPiRepositoryConfigPtrOutput

type PyPiRepositoryConfigInput added in v0.32.0

type PyPiRepositoryConfigInput interface {
	pulumi.Input

	ToPyPiRepositoryConfigOutput() PyPiRepositoryConfigOutput
	ToPyPiRepositoryConfigOutputWithContext(context.Context) PyPiRepositoryConfigOutput
}

PyPiRepositoryConfigInput is an input type that accepts PyPiRepositoryConfigArgs and PyPiRepositoryConfigOutput values. You can construct a concrete instance of `PyPiRepositoryConfigInput` via:

PyPiRepositoryConfigArgs{...}

type PyPiRepositoryConfigOutput added in v0.32.0

type PyPiRepositoryConfigOutput struct{ *pulumi.OutputState }

Configuration for PyPi repository

func (PyPiRepositoryConfigOutput) ElementType added in v0.32.0

func (PyPiRepositoryConfigOutput) ElementType() reflect.Type

func (PyPiRepositoryConfigOutput) PypiRepository added in v0.32.0

Optional. PyPi repository address

func (PyPiRepositoryConfigOutput) ToPyPiRepositoryConfigOutput added in v0.32.0

func (o PyPiRepositoryConfigOutput) ToPyPiRepositoryConfigOutput() PyPiRepositoryConfigOutput

func (PyPiRepositoryConfigOutput) ToPyPiRepositoryConfigOutputWithContext added in v0.32.0

func (o PyPiRepositoryConfigOutput) ToPyPiRepositoryConfigOutputWithContext(ctx context.Context) PyPiRepositoryConfigOutput

func (PyPiRepositoryConfigOutput) ToPyPiRepositoryConfigPtrOutput added in v0.32.0

func (o PyPiRepositoryConfigOutput) ToPyPiRepositoryConfigPtrOutput() PyPiRepositoryConfigPtrOutput

func (PyPiRepositoryConfigOutput) ToPyPiRepositoryConfigPtrOutputWithContext added in v0.32.0

func (o PyPiRepositoryConfigOutput) ToPyPiRepositoryConfigPtrOutputWithContext(ctx context.Context) PyPiRepositoryConfigPtrOutput

type PyPiRepositoryConfigPtrInput added in v0.32.0

type PyPiRepositoryConfigPtrInput interface {
	pulumi.Input

	ToPyPiRepositoryConfigPtrOutput() PyPiRepositoryConfigPtrOutput
	ToPyPiRepositoryConfigPtrOutputWithContext(context.Context) PyPiRepositoryConfigPtrOutput
}

PyPiRepositoryConfigPtrInput is an input type that accepts PyPiRepositoryConfigArgs, PyPiRepositoryConfigPtr and PyPiRepositoryConfigPtrOutput values. You can construct a concrete instance of `PyPiRepositoryConfigPtrInput` via:

        PyPiRepositoryConfigArgs{...}

or:

        nil

func PyPiRepositoryConfigPtr added in v0.32.0

func PyPiRepositoryConfigPtr(v *PyPiRepositoryConfigArgs) PyPiRepositoryConfigPtrInput

type PyPiRepositoryConfigPtrOutput added in v0.32.0

type PyPiRepositoryConfigPtrOutput struct{ *pulumi.OutputState }

func (PyPiRepositoryConfigPtrOutput) Elem added in v0.32.0

func (PyPiRepositoryConfigPtrOutput) ElementType added in v0.32.0

func (PyPiRepositoryConfigPtrOutput) PypiRepository added in v0.32.0

Optional. PyPi repository address

func (PyPiRepositoryConfigPtrOutput) ToPyPiRepositoryConfigPtrOutput added in v0.32.0

func (o PyPiRepositoryConfigPtrOutput) ToPyPiRepositoryConfigPtrOutput() PyPiRepositoryConfigPtrOutput

func (PyPiRepositoryConfigPtrOutput) ToPyPiRepositoryConfigPtrOutputWithContext added in v0.32.0

func (o PyPiRepositoryConfigPtrOutput) ToPyPiRepositoryConfigPtrOutputWithContext(ctx context.Context) PyPiRepositoryConfigPtrOutput

type PyPiRepositoryConfigResponse added in v0.32.0

type PyPiRepositoryConfigResponse struct {
	// Optional. PyPi repository address
	PypiRepository string `pulumi:"pypiRepository"`
}

Configuration for PyPi repository

type PyPiRepositoryConfigResponseOutput added in v0.32.0

type PyPiRepositoryConfigResponseOutput struct{ *pulumi.OutputState }

Configuration for PyPi repository

func (PyPiRepositoryConfigResponseOutput) ElementType added in v0.32.0

func (PyPiRepositoryConfigResponseOutput) PypiRepository added in v0.32.0

Optional. PyPi repository address

func (PyPiRepositoryConfigResponseOutput) ToPyPiRepositoryConfigResponseOutput added in v0.32.0

func (o PyPiRepositoryConfigResponseOutput) ToPyPiRepositoryConfigResponseOutput() PyPiRepositoryConfigResponseOutput

func (PyPiRepositoryConfigResponseOutput) ToPyPiRepositoryConfigResponseOutputWithContext added in v0.32.0

func (o PyPiRepositoryConfigResponseOutput) ToPyPiRepositoryConfigResponseOutputWithContext(ctx context.Context) PyPiRepositoryConfigResponseOutput

type PySparkBatch added in v0.12.0

type PySparkBatch struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor.
	FileUris []string `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
	JarFileUris []string `pulumi:"jarFileUris"`
	// The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
	MainPythonFileUri string `pulumi:"mainPythonFileUri"`
	// Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris []string `pulumi:"pythonFileUris"`
}

A configuration for running an Apache PySpark (https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html) batch workload.

type PySparkBatchArgs added in v0.12.0

type PySparkBatchArgs struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
	MainPythonFileUri pulumi.StringInput `pulumi:"mainPythonFileUri"`
	// Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris pulumi.StringArrayInput `pulumi:"pythonFileUris"`
}

A configuration for running an Apache PySpark (https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html) batch workload.

func (PySparkBatchArgs) ElementType added in v0.12.0

func (PySparkBatchArgs) ElementType() reflect.Type

func (PySparkBatchArgs) ToPySparkBatchOutput added in v0.12.0

func (i PySparkBatchArgs) ToPySparkBatchOutput() PySparkBatchOutput

func (PySparkBatchArgs) ToPySparkBatchOutputWithContext added in v0.12.0

func (i PySparkBatchArgs) ToPySparkBatchOutputWithContext(ctx context.Context) PySparkBatchOutput

func (PySparkBatchArgs) ToPySparkBatchPtrOutput added in v0.12.0

func (i PySparkBatchArgs) ToPySparkBatchPtrOutput() PySparkBatchPtrOutput

func (PySparkBatchArgs) ToPySparkBatchPtrOutputWithContext added in v0.12.0

func (i PySparkBatchArgs) ToPySparkBatchPtrOutputWithContext(ctx context.Context) PySparkBatchPtrOutput

type PySparkBatchInput added in v0.12.0

type PySparkBatchInput interface {
	pulumi.Input

	ToPySparkBatchOutput() PySparkBatchOutput
	ToPySparkBatchOutputWithContext(context.Context) PySparkBatchOutput
}

PySparkBatchInput is an input type that accepts PySparkBatchArgs and PySparkBatchOutput values. You can construct a concrete instance of `PySparkBatchInput` via:

PySparkBatchArgs{...}

type PySparkBatchOutput added in v0.12.0

type PySparkBatchOutput struct{ *pulumi.OutputState }

A configuration for running an Apache PySpark (https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html) batch workload.

func (PySparkBatchOutput) ArchiveUris added in v0.12.0

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (PySparkBatchOutput) Args added in v0.12.0

Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.

func (PySparkBatchOutput) ElementType added in v0.12.0

func (PySparkBatchOutput) ElementType() reflect.Type

func (PySparkBatchOutput) FileUris added in v0.12.0

Optional. HCFS URIs of files to be placed in the working directory of each executor.

func (PySparkBatchOutput) JarFileUris added in v0.12.0

Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.

func (PySparkBatchOutput) MainPythonFileUri added in v0.12.0

func (o PySparkBatchOutput) MainPythonFileUri() pulumi.StringOutput

The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.

func (PySparkBatchOutput) PythonFileUris added in v0.12.0

func (o PySparkBatchOutput) PythonFileUris() pulumi.StringArrayOutput

Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.

func (PySparkBatchOutput) ToPySparkBatchOutput added in v0.12.0

func (o PySparkBatchOutput) ToPySparkBatchOutput() PySparkBatchOutput

func (PySparkBatchOutput) ToPySparkBatchOutputWithContext added in v0.12.0

func (o PySparkBatchOutput) ToPySparkBatchOutputWithContext(ctx context.Context) PySparkBatchOutput

func (PySparkBatchOutput) ToPySparkBatchPtrOutput added in v0.12.0

func (o PySparkBatchOutput) ToPySparkBatchPtrOutput() PySparkBatchPtrOutput

func (PySparkBatchOutput) ToPySparkBatchPtrOutputWithContext added in v0.12.0

func (o PySparkBatchOutput) ToPySparkBatchPtrOutputWithContext(ctx context.Context) PySparkBatchPtrOutput

type PySparkBatchPtrInput added in v0.12.0

type PySparkBatchPtrInput interface {
	pulumi.Input

	ToPySparkBatchPtrOutput() PySparkBatchPtrOutput
	ToPySparkBatchPtrOutputWithContext(context.Context) PySparkBatchPtrOutput
}

PySparkBatchPtrInput is an input type that accepts PySparkBatchArgs, PySparkBatchPtr and PySparkBatchPtrOutput values. You can construct a concrete instance of `PySparkBatchPtrInput` via:

        PySparkBatchArgs{...}

or:

        nil

func PySparkBatchPtr added in v0.12.0

func PySparkBatchPtr(v *PySparkBatchArgs) PySparkBatchPtrInput

type PySparkBatchPtrOutput added in v0.12.0

type PySparkBatchPtrOutput struct{ *pulumi.OutputState }

func (PySparkBatchPtrOutput) ArchiveUris added in v0.12.0

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (PySparkBatchPtrOutput) Args added in v0.12.0

Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.

func (PySparkBatchPtrOutput) Elem added in v0.12.0

func (PySparkBatchPtrOutput) ElementType added in v0.12.0

func (PySparkBatchPtrOutput) ElementType() reflect.Type

func (PySparkBatchPtrOutput) FileUris added in v0.12.0

Optional. HCFS URIs of files to be placed in the working directory of each executor.

func (PySparkBatchPtrOutput) JarFileUris added in v0.12.0

Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.

func (PySparkBatchPtrOutput) MainPythonFileUri added in v0.12.0

func (o PySparkBatchPtrOutput) MainPythonFileUri() pulumi.StringPtrOutput

The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.

func (PySparkBatchPtrOutput) PythonFileUris added in v0.12.0

func (o PySparkBatchPtrOutput) PythonFileUris() pulumi.StringArrayOutput

Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.

func (PySparkBatchPtrOutput) ToPySparkBatchPtrOutput added in v0.12.0

func (o PySparkBatchPtrOutput) ToPySparkBatchPtrOutput() PySparkBatchPtrOutput

func (PySparkBatchPtrOutput) ToPySparkBatchPtrOutputWithContext added in v0.12.0

func (o PySparkBatchPtrOutput) ToPySparkBatchPtrOutputWithContext(ctx context.Context) PySparkBatchPtrOutput

type PySparkBatchResponse added in v0.12.0

type PySparkBatchResponse struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor.
	FileUris []string `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
	JarFileUris []string `pulumi:"jarFileUris"`
	// The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
	MainPythonFileUri string `pulumi:"mainPythonFileUri"`
	// Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris []string `pulumi:"pythonFileUris"`
}

A configuration for running an Apache PySpark (https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html) batch workload.

type PySparkBatchResponseOutput added in v0.12.0

type PySparkBatchResponseOutput struct{ *pulumi.OutputState }

A configuration for running an Apache PySpark (https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html) batch workload.

func (PySparkBatchResponseOutput) ArchiveUris added in v0.12.0

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (PySparkBatchResponseOutput) Args added in v0.12.0

Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.

func (PySparkBatchResponseOutput) ElementType added in v0.12.0

func (PySparkBatchResponseOutput) ElementType() reflect.Type

func (PySparkBatchResponseOutput) FileUris added in v0.12.0

Optional. HCFS URIs of files to be placed in the working directory of each executor.

func (PySparkBatchResponseOutput) JarFileUris added in v0.12.0

Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.

func (PySparkBatchResponseOutput) MainPythonFileUri added in v0.12.0

func (o PySparkBatchResponseOutput) MainPythonFileUri() pulumi.StringOutput

The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.

func (PySparkBatchResponseOutput) PythonFileUris added in v0.12.0

Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.

func (PySparkBatchResponseOutput) ToPySparkBatchResponseOutput added in v0.12.0

func (o PySparkBatchResponseOutput) ToPySparkBatchResponseOutput() PySparkBatchResponseOutput

func (PySparkBatchResponseOutput) ToPySparkBatchResponseOutputWithContext added in v0.12.0

func (o PySparkBatchResponseOutput) ToPySparkBatchResponseOutputWithContext(ctx context.Context) PySparkBatchResponseOutput

type PySparkJob

type PySparkJob struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *LoggingConfig `pulumi:"loggingConfig"`
	// The HCFS URI of the main Python file to use as the driver. Must be a .py file.
	MainPythonFileUri string `pulumi:"mainPythonFileUri"`
	// Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
	Properties map[string]string `pulumi:"properties"`
	// Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris []string `pulumi:"pythonFileUris"`
}

A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.

type PySparkJobArgs

type PySparkJobArgs struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigPtrInput `pulumi:"loggingConfig"`
	// The HCFS URI of the main Python file to use as the driver. Must be a .py file.
	MainPythonFileUri pulumi.StringInput `pulumi:"mainPythonFileUri"`
	// Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris pulumi.StringArrayInput `pulumi:"pythonFileUris"`
}

A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.

func (PySparkJobArgs) ElementType

func (PySparkJobArgs) ElementType() reflect.Type

func (PySparkJobArgs) ToPySparkJobOutput

func (i PySparkJobArgs) ToPySparkJobOutput() PySparkJobOutput

func (PySparkJobArgs) ToPySparkJobOutputWithContext

func (i PySparkJobArgs) ToPySparkJobOutputWithContext(ctx context.Context) PySparkJobOutput

func (PySparkJobArgs) ToPySparkJobPtrOutput

func (i PySparkJobArgs) ToPySparkJobPtrOutput() PySparkJobPtrOutput

func (PySparkJobArgs) ToPySparkJobPtrOutputWithContext

func (i PySparkJobArgs) ToPySparkJobPtrOutputWithContext(ctx context.Context) PySparkJobPtrOutput

type PySparkJobInput

type PySparkJobInput interface {
	pulumi.Input

	ToPySparkJobOutput() PySparkJobOutput
	ToPySparkJobOutputWithContext(context.Context) PySparkJobOutput
}

PySparkJobInput is an input type that accepts PySparkJobArgs and PySparkJobOutput values. You can construct a concrete instance of `PySparkJobInput` via:

PySparkJobArgs{...}

type PySparkJobOutput

type PySparkJobOutput struct{ *pulumi.OutputState }

A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.

func (PySparkJobOutput) ArchiveUris

func (o PySparkJobOutput) ArchiveUris() pulumi.StringArrayOutput

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (PySparkJobOutput) Args

Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (PySparkJobOutput) ElementType

func (PySparkJobOutput) ElementType() reflect.Type

func (PySparkJobOutput) FileUris

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (PySparkJobOutput) JarFileUris

func (o PySparkJobOutput) JarFileUris() pulumi.StringArrayOutput

Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.

func (PySparkJobOutput) LoggingConfig

func (o PySparkJobOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (PySparkJobOutput) MainPythonFileUri

func (o PySparkJobOutput) MainPythonFileUri() pulumi.StringOutput

The HCFS URI of the main Python file to use as the driver. Must be a .py file.

func (PySparkJobOutput) Properties

func (o PySparkJobOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.

func (PySparkJobOutput) PythonFileUris

func (o PySparkJobOutput) PythonFileUris() pulumi.StringArrayOutput

Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.

func (PySparkJobOutput) ToPySparkJobOutput

func (o PySparkJobOutput) ToPySparkJobOutput() PySparkJobOutput

func (PySparkJobOutput) ToPySparkJobOutputWithContext

func (o PySparkJobOutput) ToPySparkJobOutputWithContext(ctx context.Context) PySparkJobOutput

func (PySparkJobOutput) ToPySparkJobPtrOutput

func (o PySparkJobOutput) ToPySparkJobPtrOutput() PySparkJobPtrOutput

func (PySparkJobOutput) ToPySparkJobPtrOutputWithContext

func (o PySparkJobOutput) ToPySparkJobPtrOutputWithContext(ctx context.Context) PySparkJobPtrOutput

type PySparkJobPtrInput

type PySparkJobPtrInput interface {
	pulumi.Input

	ToPySparkJobPtrOutput() PySparkJobPtrOutput
	ToPySparkJobPtrOutputWithContext(context.Context) PySparkJobPtrOutput
}

PySparkJobPtrInput is an input type that accepts PySparkJobArgs, PySparkJobPtr and PySparkJobPtrOutput values. You can construct a concrete instance of `PySparkJobPtrInput` via:

        PySparkJobArgs{...}

or:

        nil

func PySparkJobPtr

func PySparkJobPtr(v *PySparkJobArgs) PySparkJobPtrInput

type PySparkJobPtrOutput

type PySparkJobPtrOutput struct{ *pulumi.OutputState }

func (PySparkJobPtrOutput) ArchiveUris

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (PySparkJobPtrOutput) Args

Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (PySparkJobPtrOutput) Elem

func (PySparkJobPtrOutput) ElementType

func (PySparkJobPtrOutput) ElementType() reflect.Type

func (PySparkJobPtrOutput) FileUris

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (PySparkJobPtrOutput) JarFileUris

Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.

func (PySparkJobPtrOutput) LoggingConfig

func (o PySparkJobPtrOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (PySparkJobPtrOutput) MainPythonFileUri

func (o PySparkJobPtrOutput) MainPythonFileUri() pulumi.StringPtrOutput

The HCFS URI of the main Python file to use as the driver. Must be a .py file.

func (PySparkJobPtrOutput) Properties

Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.

func (PySparkJobPtrOutput) PythonFileUris

func (o PySparkJobPtrOutput) PythonFileUris() pulumi.StringArrayOutput

Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.

func (PySparkJobPtrOutput) ToPySparkJobPtrOutput

func (o PySparkJobPtrOutput) ToPySparkJobPtrOutput() PySparkJobPtrOutput

func (PySparkJobPtrOutput) ToPySparkJobPtrOutputWithContext

func (o PySparkJobPtrOutput) ToPySparkJobPtrOutputWithContext(ctx context.Context) PySparkJobPtrOutput

type PySparkJobResponse

type PySparkJobResponse struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigResponse `pulumi:"loggingConfig"`
	// The HCFS URI of the main Python file to use as the driver. Must be a .py file.
	MainPythonFileUri string `pulumi:"mainPythonFileUri"`
	// Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
	Properties map[string]string `pulumi:"properties"`
	// Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris []string `pulumi:"pythonFileUris"`
}

A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.

type PySparkJobResponseOutput

type PySparkJobResponseOutput struct{ *pulumi.OutputState }

A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.

func (PySparkJobResponseOutput) ArchiveUris

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (PySparkJobResponseOutput) Args

Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (PySparkJobResponseOutput) ElementType

func (PySparkJobResponseOutput) ElementType() reflect.Type

func (PySparkJobResponseOutput) FileUris

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (PySparkJobResponseOutput) JarFileUris

Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.

func (PySparkJobResponseOutput) LoggingConfig

Optional. The runtime log config for job execution.

func (PySparkJobResponseOutput) MainPythonFileUri

func (o PySparkJobResponseOutput) MainPythonFileUri() pulumi.StringOutput

The HCFS URI of the main Python file to use as the driver. Must be a .py file.

func (PySparkJobResponseOutput) Properties

Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.

func (PySparkJobResponseOutput) PythonFileUris

Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.

func (PySparkJobResponseOutput) ToPySparkJobResponseOutput

func (o PySparkJobResponseOutput) ToPySparkJobResponseOutput() PySparkJobResponseOutput

func (PySparkJobResponseOutput) ToPySparkJobResponseOutputWithContext

func (o PySparkJobResponseOutput) ToPySparkJobResponseOutputWithContext(ctx context.Context) PySparkJobResponseOutput

type QueryList

type QueryList struct {
	// The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }
	Queries []string `pulumi:"queries"`
}

A list of queries to run on a cluster.

type QueryListArgs

type QueryListArgs struct {
	// The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }
	Queries pulumi.StringArrayInput `pulumi:"queries"`
}

A list of queries to run on a cluster.

func (QueryListArgs) ElementType

func (QueryListArgs) ElementType() reflect.Type

func (QueryListArgs) ToQueryListOutput

func (i QueryListArgs) ToQueryListOutput() QueryListOutput

func (QueryListArgs) ToQueryListOutputWithContext

func (i QueryListArgs) ToQueryListOutputWithContext(ctx context.Context) QueryListOutput

func (QueryListArgs) ToQueryListPtrOutput

func (i QueryListArgs) ToQueryListPtrOutput() QueryListPtrOutput

func (QueryListArgs) ToQueryListPtrOutputWithContext

func (i QueryListArgs) ToQueryListPtrOutputWithContext(ctx context.Context) QueryListPtrOutput

type QueryListInput

type QueryListInput interface {
	pulumi.Input

	ToQueryListOutput() QueryListOutput
	ToQueryListOutputWithContext(context.Context) QueryListOutput
}

QueryListInput is an input type that accepts QueryListArgs and QueryListOutput values. You can construct a concrete instance of `QueryListInput` via:

QueryListArgs{...}

type QueryListOutput

type QueryListOutput struct{ *pulumi.OutputState }

A list of queries to run on a cluster.

func (QueryListOutput) ElementType

func (QueryListOutput) ElementType() reflect.Type

func (QueryListOutput) Queries

The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }

func (QueryListOutput) ToQueryListOutput

func (o QueryListOutput) ToQueryListOutput() QueryListOutput

func (QueryListOutput) ToQueryListOutputWithContext

func (o QueryListOutput) ToQueryListOutputWithContext(ctx context.Context) QueryListOutput

func (QueryListOutput) ToQueryListPtrOutput

func (o QueryListOutput) ToQueryListPtrOutput() QueryListPtrOutput

func (QueryListOutput) ToQueryListPtrOutputWithContext

func (o QueryListOutput) ToQueryListPtrOutputWithContext(ctx context.Context) QueryListPtrOutput

type QueryListPtrInput

type QueryListPtrInput interface {
	pulumi.Input

	ToQueryListPtrOutput() QueryListPtrOutput
	ToQueryListPtrOutputWithContext(context.Context) QueryListPtrOutput
}

QueryListPtrInput is an input type that accepts QueryListArgs, QueryListPtr and QueryListPtrOutput values. You can construct a concrete instance of `QueryListPtrInput` via:

        QueryListArgs{...}

or:

        nil

func QueryListPtr

func QueryListPtr(v *QueryListArgs) QueryListPtrInput

type QueryListPtrOutput

type QueryListPtrOutput struct{ *pulumi.OutputState }

func (QueryListPtrOutput) Elem

func (QueryListPtrOutput) ElementType

func (QueryListPtrOutput) ElementType() reflect.Type

func (QueryListPtrOutput) Queries

The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }

func (QueryListPtrOutput) ToQueryListPtrOutput

func (o QueryListPtrOutput) ToQueryListPtrOutput() QueryListPtrOutput

func (QueryListPtrOutput) ToQueryListPtrOutputWithContext

func (o QueryListPtrOutput) ToQueryListPtrOutputWithContext(ctx context.Context) QueryListPtrOutput

type QueryListResponse

type QueryListResponse struct {
	// The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }
	Queries []string `pulumi:"queries"`
}

A list of queries to run on a cluster.

type QueryListResponseOutput

type QueryListResponseOutput struct{ *pulumi.OutputState }

A list of queries to run on a cluster.

func (QueryListResponseOutput) ElementType

func (QueryListResponseOutput) ElementType() reflect.Type

func (QueryListResponseOutput) Queries

The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }

func (QueryListResponseOutput) ToQueryListResponseOutput

func (o QueryListResponseOutput) ToQueryListResponseOutput() QueryListResponseOutput

func (QueryListResponseOutput) ToQueryListResponseOutputWithContext

func (o QueryListResponseOutput) ToQueryListResponseOutputWithContext(ctx context.Context) QueryListResponseOutput

type RegexValidation

type RegexValidation struct {
	// RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).
	Regexes []string `pulumi:"regexes"`
}

Validation based on regular expressions.

type RegexValidationArgs

type RegexValidationArgs struct {
	// RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).
	Regexes pulumi.StringArrayInput `pulumi:"regexes"`
}

Validation based on regular expressions.

func (RegexValidationArgs) ElementType

func (RegexValidationArgs) ElementType() reflect.Type

func (RegexValidationArgs) ToRegexValidationOutput

func (i RegexValidationArgs) ToRegexValidationOutput() RegexValidationOutput

func (RegexValidationArgs) ToRegexValidationOutputWithContext

func (i RegexValidationArgs) ToRegexValidationOutputWithContext(ctx context.Context) RegexValidationOutput

func (RegexValidationArgs) ToRegexValidationPtrOutput

func (i RegexValidationArgs) ToRegexValidationPtrOutput() RegexValidationPtrOutput

func (RegexValidationArgs) ToRegexValidationPtrOutputWithContext

func (i RegexValidationArgs) ToRegexValidationPtrOutputWithContext(ctx context.Context) RegexValidationPtrOutput

type RegexValidationInput

type RegexValidationInput interface {
	pulumi.Input

	ToRegexValidationOutput() RegexValidationOutput
	ToRegexValidationOutputWithContext(context.Context) RegexValidationOutput
}

RegexValidationInput is an input type that accepts RegexValidationArgs and RegexValidationOutput values. You can construct a concrete instance of `RegexValidationInput` via:

RegexValidationArgs{...}

type RegexValidationOutput

type RegexValidationOutput struct{ *pulumi.OutputState }

Validation based on regular expressions.

func (RegexValidationOutput) ElementType

func (RegexValidationOutput) ElementType() reflect.Type

func (RegexValidationOutput) Regexes

RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).

func (RegexValidationOutput) ToRegexValidationOutput

func (o RegexValidationOutput) ToRegexValidationOutput() RegexValidationOutput

func (RegexValidationOutput) ToRegexValidationOutputWithContext

func (o RegexValidationOutput) ToRegexValidationOutputWithContext(ctx context.Context) RegexValidationOutput

func (RegexValidationOutput) ToRegexValidationPtrOutput

func (o RegexValidationOutput) ToRegexValidationPtrOutput() RegexValidationPtrOutput

func (RegexValidationOutput) ToRegexValidationPtrOutputWithContext

func (o RegexValidationOutput) ToRegexValidationPtrOutputWithContext(ctx context.Context) RegexValidationPtrOutput

type RegexValidationPtrInput

type RegexValidationPtrInput interface {
	pulumi.Input

	ToRegexValidationPtrOutput() RegexValidationPtrOutput
	ToRegexValidationPtrOutputWithContext(context.Context) RegexValidationPtrOutput
}

RegexValidationPtrInput is an input type that accepts RegexValidationArgs, RegexValidationPtr and RegexValidationPtrOutput values. You can construct a concrete instance of `RegexValidationPtrInput` via:

        RegexValidationArgs{...}

or:

        nil

type RegexValidationPtrOutput

type RegexValidationPtrOutput struct{ *pulumi.OutputState }

func (RegexValidationPtrOutput) Elem

func (RegexValidationPtrOutput) ElementType

func (RegexValidationPtrOutput) ElementType() reflect.Type

func (RegexValidationPtrOutput) Regexes

RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).

func (RegexValidationPtrOutput) ToRegexValidationPtrOutput

func (o RegexValidationPtrOutput) ToRegexValidationPtrOutput() RegexValidationPtrOutput

func (RegexValidationPtrOutput) ToRegexValidationPtrOutputWithContext

func (o RegexValidationPtrOutput) ToRegexValidationPtrOutputWithContext(ctx context.Context) RegexValidationPtrOutput

type RegexValidationResponse

type RegexValidationResponse struct {
	// RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).
	Regexes []string `pulumi:"regexes"`
}

Validation based on regular expressions.

type RegexValidationResponseOutput

type RegexValidationResponseOutput struct{ *pulumi.OutputState }

Validation based on regular expressions.

func (RegexValidationResponseOutput) ElementType

func (RegexValidationResponseOutput) Regexes

RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).

func (RegexValidationResponseOutput) ToRegexValidationResponseOutput

func (o RegexValidationResponseOutput) ToRegexValidationResponseOutput() RegexValidationResponseOutput

func (RegexValidationResponseOutput) ToRegexValidationResponseOutputWithContext

func (o RegexValidationResponseOutput) ToRegexValidationResponseOutputWithContext(ctx context.Context) RegexValidationResponseOutput

type RegionAutoscalingPolicyIamBinding added in v0.26.0

type RegionAutoscalingPolicyIamBinding struct {
	pulumi.CustomResourceState

	// An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
	Condition iam.ConditionPtrOutput `pulumi:"condition"`
	// The etag of the resource's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Members pulumi.StringArrayOutput `pulumi:"members"`
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringOutput `pulumi:"name"`
	// The project in which the resource belongs. If it is not provided, a default will be supplied.
	Project pulumi.StringOutput `pulumi:"project"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role pulumi.StringOutput `pulumi:"role"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.

func GetRegionAutoscalingPolicyIamBinding added in v0.26.0

func GetRegionAutoscalingPolicyIamBinding(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *RegionAutoscalingPolicyIamBindingState, opts ...pulumi.ResourceOption) (*RegionAutoscalingPolicyIamBinding, error)

GetRegionAutoscalingPolicyIamBinding gets an existing RegionAutoscalingPolicyIamBinding resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewRegionAutoscalingPolicyIamBinding added in v0.26.0

func NewRegionAutoscalingPolicyIamBinding(ctx *pulumi.Context,
	name string, args *RegionAutoscalingPolicyIamBindingArgs, opts ...pulumi.ResourceOption) (*RegionAutoscalingPolicyIamBinding, error)

NewRegionAutoscalingPolicyIamBinding registers a new resource with the given unique name, arguments, and options.

func (*RegionAutoscalingPolicyIamBinding) ElementType added in v0.26.0

func (*RegionAutoscalingPolicyIamBinding) ToRegionAutoscalingPolicyIamBindingOutput added in v0.26.0

func (i *RegionAutoscalingPolicyIamBinding) ToRegionAutoscalingPolicyIamBindingOutput() RegionAutoscalingPolicyIamBindingOutput

func (*RegionAutoscalingPolicyIamBinding) ToRegionAutoscalingPolicyIamBindingOutputWithContext added in v0.26.0

func (i *RegionAutoscalingPolicyIamBinding) ToRegionAutoscalingPolicyIamBindingOutputWithContext(ctx context.Context) RegionAutoscalingPolicyIamBindingOutput

type RegionAutoscalingPolicyIamBindingArgs added in v0.26.0

type RegionAutoscalingPolicyIamBindingArgs struct {
	// An IAM Condition for a given binding.
	Condition iam.ConditionPtrInput
	// Identities that will be granted the privilege in role. Each entry can have one of the following values:
	//
	//  * user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
	//  * serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
	//  * group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
	//  * domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
	Members pulumi.StringArrayInput
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringInput
	// The role that should be applied. Only one `IamBinding` can be used per role.
	Role pulumi.StringInput
}

The set of arguments for constructing a RegionAutoscalingPolicyIamBinding resource.

func (RegionAutoscalingPolicyIamBindingArgs) ElementType added in v0.26.0

type RegionAutoscalingPolicyIamBindingInput added in v0.26.0

type RegionAutoscalingPolicyIamBindingInput interface {
	pulumi.Input

	ToRegionAutoscalingPolicyIamBindingOutput() RegionAutoscalingPolicyIamBindingOutput
	ToRegionAutoscalingPolicyIamBindingOutputWithContext(ctx context.Context) RegionAutoscalingPolicyIamBindingOutput
}

type RegionAutoscalingPolicyIamBindingOutput added in v0.26.0

type RegionAutoscalingPolicyIamBindingOutput struct{ *pulumi.OutputState }

func (RegionAutoscalingPolicyIamBindingOutput) Condition added in v0.26.0

An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.

func (RegionAutoscalingPolicyIamBindingOutput) ElementType added in v0.26.0

func (RegionAutoscalingPolicyIamBindingOutput) Etag added in v0.26.0

The etag of the resource's IAM policy.

func (RegionAutoscalingPolicyIamBindingOutput) Members added in v0.26.0

Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.

func (RegionAutoscalingPolicyIamBindingOutput) Name added in v0.26.0

The name of the resource to manage IAM policies for.

func (RegionAutoscalingPolicyIamBindingOutput) Project added in v0.26.0

The project in which the resource belongs. If it is not provided, a default will be supplied.

func (RegionAutoscalingPolicyIamBindingOutput) Role added in v0.26.0

Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.

func (RegionAutoscalingPolicyIamBindingOutput) ToRegionAutoscalingPolicyIamBindingOutput added in v0.26.0

func (o RegionAutoscalingPolicyIamBindingOutput) ToRegionAutoscalingPolicyIamBindingOutput() RegionAutoscalingPolicyIamBindingOutput

func (RegionAutoscalingPolicyIamBindingOutput) ToRegionAutoscalingPolicyIamBindingOutputWithContext added in v0.26.0

func (o RegionAutoscalingPolicyIamBindingOutput) ToRegionAutoscalingPolicyIamBindingOutputWithContext(ctx context.Context) RegionAutoscalingPolicyIamBindingOutput

type RegionAutoscalingPolicyIamBindingState added in v0.26.0

type RegionAutoscalingPolicyIamBindingState struct {
}

func (RegionAutoscalingPolicyIamBindingState) ElementType added in v0.26.0

type RegionAutoscalingPolicyIamMember added in v0.26.0

type RegionAutoscalingPolicyIamMember struct {
	pulumi.CustomResourceState

	// An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
	Condition iam.ConditionPtrOutput `pulumi:"condition"`
	// The etag of the resource's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Member pulumi.StringOutput `pulumi:"member"`
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringOutput `pulumi:"name"`
	// The project in which the resource belongs. If it is not provided, a default will be supplied.
	Project pulumi.StringOutput `pulumi:"project"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role pulumi.StringOutput `pulumi:"role"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.

func GetRegionAutoscalingPolicyIamMember added in v0.26.0

func GetRegionAutoscalingPolicyIamMember(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *RegionAutoscalingPolicyIamMemberState, opts ...pulumi.ResourceOption) (*RegionAutoscalingPolicyIamMember, error)

GetRegionAutoscalingPolicyIamMember gets an existing RegionAutoscalingPolicyIamMember resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewRegionAutoscalingPolicyIamMember added in v0.26.0

func NewRegionAutoscalingPolicyIamMember(ctx *pulumi.Context,
	name string, args *RegionAutoscalingPolicyIamMemberArgs, opts ...pulumi.ResourceOption) (*RegionAutoscalingPolicyIamMember, error)

NewRegionAutoscalingPolicyIamMember registers a new resource with the given unique name, arguments, and options.

func (*RegionAutoscalingPolicyIamMember) ElementType added in v0.26.0

func (*RegionAutoscalingPolicyIamMember) ToRegionAutoscalingPolicyIamMemberOutput added in v0.26.0

func (i *RegionAutoscalingPolicyIamMember) ToRegionAutoscalingPolicyIamMemberOutput() RegionAutoscalingPolicyIamMemberOutput

func (*RegionAutoscalingPolicyIamMember) ToRegionAutoscalingPolicyIamMemberOutputWithContext added in v0.26.0

func (i *RegionAutoscalingPolicyIamMember) ToRegionAutoscalingPolicyIamMemberOutputWithContext(ctx context.Context) RegionAutoscalingPolicyIamMemberOutput

type RegionAutoscalingPolicyIamMemberArgs added in v0.26.0

type RegionAutoscalingPolicyIamMemberArgs struct {
	// An IAM Condition for a given binding.
	Condition iam.ConditionPtrInput
	// Identity that will be granted the privilege in role. The entry can have one of the following values:
	//
	//  * user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
	//  * serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
	//  * group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
	//  * domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
	Member pulumi.StringInput
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringInput
	// The role that should be applied.
	Role pulumi.StringInput
}

The set of arguments for constructing a RegionAutoscalingPolicyIamMember resource.

func (RegionAutoscalingPolicyIamMemberArgs) ElementType added in v0.26.0

type RegionAutoscalingPolicyIamMemberInput added in v0.26.0

type RegionAutoscalingPolicyIamMemberInput interface {
	pulumi.Input

	ToRegionAutoscalingPolicyIamMemberOutput() RegionAutoscalingPolicyIamMemberOutput
	ToRegionAutoscalingPolicyIamMemberOutputWithContext(ctx context.Context) RegionAutoscalingPolicyIamMemberOutput
}

type RegionAutoscalingPolicyIamMemberOutput added in v0.26.0

type RegionAutoscalingPolicyIamMemberOutput struct{ *pulumi.OutputState }

func (RegionAutoscalingPolicyIamMemberOutput) Condition added in v0.26.0

An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.

func (RegionAutoscalingPolicyIamMemberOutput) ElementType added in v0.26.0

func (RegionAutoscalingPolicyIamMemberOutput) Etag added in v0.26.0

The etag of the resource's IAM policy.

func (RegionAutoscalingPolicyIamMemberOutput) Member added in v0.26.0

Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.

func (RegionAutoscalingPolicyIamMemberOutput) Name added in v0.26.0

The name of the resource to manage IAM policies for.

func (RegionAutoscalingPolicyIamMemberOutput) Project added in v0.26.0

The project in which the resource belongs. If it is not provided, a default will be supplied.

func (RegionAutoscalingPolicyIamMemberOutput) Role added in v0.26.0

Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.

func (RegionAutoscalingPolicyIamMemberOutput) ToRegionAutoscalingPolicyIamMemberOutput added in v0.26.0

func (o RegionAutoscalingPolicyIamMemberOutput) ToRegionAutoscalingPolicyIamMemberOutput() RegionAutoscalingPolicyIamMemberOutput

func (RegionAutoscalingPolicyIamMemberOutput) ToRegionAutoscalingPolicyIamMemberOutputWithContext added in v0.26.0

func (o RegionAutoscalingPolicyIamMemberOutput) ToRegionAutoscalingPolicyIamMemberOutputWithContext(ctx context.Context) RegionAutoscalingPolicyIamMemberOutput

type RegionAutoscalingPolicyIamMemberState added in v0.26.0

type RegionAutoscalingPolicyIamMemberState struct {
}

func (RegionAutoscalingPolicyIamMemberState) ElementType added in v0.26.0

type RegionAutoscalingPolicyIamPolicy

type RegionAutoscalingPolicyIamPolicy struct {
	pulumi.CustomResourceState

	AutoscalingPolicyId pulumi.StringOutput `pulumi:"autoscalingPolicyId"`
	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings BindingResponseArrayOutput `pulumi:"bindings"`
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag     pulumi.StringOutput `pulumi:"etag"`
	Project  pulumi.StringOutput `pulumi:"project"`
	RegionId pulumi.StringOutput `pulumi:"regionId"`
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version pulumi.IntOutput `pulumi:"version"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors. Note - this resource's API doesn't support deletion. When deleted, the resource will persist on Google Cloud even though it will be deleted from Pulumi state.

func GetRegionAutoscalingPolicyIamPolicy

func GetRegionAutoscalingPolicyIamPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *RegionAutoscalingPolicyIamPolicyState, opts ...pulumi.ResourceOption) (*RegionAutoscalingPolicyIamPolicy, error)

GetRegionAutoscalingPolicyIamPolicy gets an existing RegionAutoscalingPolicyIamPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewRegionAutoscalingPolicyIamPolicy

func NewRegionAutoscalingPolicyIamPolicy(ctx *pulumi.Context,
	name string, args *RegionAutoscalingPolicyIamPolicyArgs, opts ...pulumi.ResourceOption) (*RegionAutoscalingPolicyIamPolicy, error)

NewRegionAutoscalingPolicyIamPolicy registers a new resource with the given unique name, arguments, and options.

func (*RegionAutoscalingPolicyIamPolicy) ElementType

func (*RegionAutoscalingPolicyIamPolicy) ToRegionAutoscalingPolicyIamPolicyOutput

func (i *RegionAutoscalingPolicyIamPolicy) ToRegionAutoscalingPolicyIamPolicyOutput() RegionAutoscalingPolicyIamPolicyOutput

func (*RegionAutoscalingPolicyIamPolicy) ToRegionAutoscalingPolicyIamPolicyOutputWithContext

func (i *RegionAutoscalingPolicyIamPolicy) ToRegionAutoscalingPolicyIamPolicyOutputWithContext(ctx context.Context) RegionAutoscalingPolicyIamPolicyOutput

type RegionAutoscalingPolicyIamPolicyArgs

type RegionAutoscalingPolicyIamPolicyArgs struct {
	AutoscalingPolicyId pulumi.StringInput
	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings BindingArrayInput
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag     pulumi.StringPtrInput
	Project  pulumi.StringPtrInput
	RegionId pulumi.StringInput
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version pulumi.IntPtrInput
}

The set of arguments for constructing a RegionAutoscalingPolicyIamPolicy resource.

func (RegionAutoscalingPolicyIamPolicyArgs) ElementType

type RegionAutoscalingPolicyIamPolicyInput

type RegionAutoscalingPolicyIamPolicyInput interface {
	pulumi.Input

	ToRegionAutoscalingPolicyIamPolicyOutput() RegionAutoscalingPolicyIamPolicyOutput
	ToRegionAutoscalingPolicyIamPolicyOutputWithContext(ctx context.Context) RegionAutoscalingPolicyIamPolicyOutput
}

type RegionAutoscalingPolicyIamPolicyOutput

type RegionAutoscalingPolicyIamPolicyOutput struct{ *pulumi.OutputState }

func (RegionAutoscalingPolicyIamPolicyOutput) AutoscalingPolicyId added in v0.21.0

func (RegionAutoscalingPolicyIamPolicyOutput) Bindings added in v0.19.0

Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.

func (RegionAutoscalingPolicyIamPolicyOutput) ElementType

func (RegionAutoscalingPolicyIamPolicyOutput) Etag added in v0.19.0

etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.

func (RegionAutoscalingPolicyIamPolicyOutput) Project added in v0.21.0

func (RegionAutoscalingPolicyIamPolicyOutput) RegionId added in v0.21.0

func (RegionAutoscalingPolicyIamPolicyOutput) ToRegionAutoscalingPolicyIamPolicyOutput

func (o RegionAutoscalingPolicyIamPolicyOutput) ToRegionAutoscalingPolicyIamPolicyOutput() RegionAutoscalingPolicyIamPolicyOutput

func (RegionAutoscalingPolicyIamPolicyOutput) ToRegionAutoscalingPolicyIamPolicyOutputWithContext

func (o RegionAutoscalingPolicyIamPolicyOutput) ToRegionAutoscalingPolicyIamPolicyOutputWithContext(ctx context.Context) RegionAutoscalingPolicyIamPolicyOutput

func (RegionAutoscalingPolicyIamPolicyOutput) Version added in v0.19.0

Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).

type RegionAutoscalingPolicyIamPolicyState

type RegionAutoscalingPolicyIamPolicyState struct {
}

func (RegionAutoscalingPolicyIamPolicyState) ElementType

type RegionClusterIamBinding added in v0.26.0

type RegionClusterIamBinding struct {
	pulumi.CustomResourceState

	// An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
	Condition iam.ConditionPtrOutput `pulumi:"condition"`
	// The etag of the resource's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Members pulumi.StringArrayOutput `pulumi:"members"`
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringOutput `pulumi:"name"`
	// The project in which the resource belongs. If it is not provided, a default will be supplied.
	Project pulumi.StringOutput `pulumi:"project"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role pulumi.StringOutput `pulumi:"role"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.

func GetRegionClusterIamBinding added in v0.26.0

func GetRegionClusterIamBinding(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *RegionClusterIamBindingState, opts ...pulumi.ResourceOption) (*RegionClusterIamBinding, error)

GetRegionClusterIamBinding gets an existing RegionClusterIamBinding resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewRegionClusterIamBinding added in v0.26.0

func NewRegionClusterIamBinding(ctx *pulumi.Context,
	name string, args *RegionClusterIamBindingArgs, opts ...pulumi.ResourceOption) (*RegionClusterIamBinding, error)

NewRegionClusterIamBinding registers a new resource with the given unique name, arguments, and options.

func (*RegionClusterIamBinding) ElementType added in v0.26.0

func (*RegionClusterIamBinding) ElementType() reflect.Type

func (*RegionClusterIamBinding) ToRegionClusterIamBindingOutput added in v0.26.0

func (i *RegionClusterIamBinding) ToRegionClusterIamBindingOutput() RegionClusterIamBindingOutput

func (*RegionClusterIamBinding) ToRegionClusterIamBindingOutputWithContext added in v0.26.0

func (i *RegionClusterIamBinding) ToRegionClusterIamBindingOutputWithContext(ctx context.Context) RegionClusterIamBindingOutput

type RegionClusterIamBindingArgs added in v0.26.0

type RegionClusterIamBindingArgs struct {
	// An IAM Condition for a given binding.
	Condition iam.ConditionPtrInput
	// Identities that will be granted the privilege in role. Each entry can have one of the following values:
	//
	//  * user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
	//  * serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
	//  * group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
	//  * domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
	Members pulumi.StringArrayInput
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringInput
	// The role that should be applied. Only one `IamBinding` can be used per role.
	Role pulumi.StringInput
}

The set of arguments for constructing a RegionClusterIamBinding resource.

func (RegionClusterIamBindingArgs) ElementType added in v0.26.0

type RegionClusterIamBindingInput added in v0.26.0

type RegionClusterIamBindingInput interface {
	pulumi.Input

	ToRegionClusterIamBindingOutput() RegionClusterIamBindingOutput
	ToRegionClusterIamBindingOutputWithContext(ctx context.Context) RegionClusterIamBindingOutput
}

type RegionClusterIamBindingOutput added in v0.26.0

type RegionClusterIamBindingOutput struct{ *pulumi.OutputState }

func (RegionClusterIamBindingOutput) Condition added in v0.26.0

An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.

func (RegionClusterIamBindingOutput) ElementType added in v0.26.0

func (RegionClusterIamBindingOutput) Etag added in v0.26.0

The etag of the resource's IAM policy.

func (RegionClusterIamBindingOutput) Members added in v0.26.0

Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.

func (RegionClusterIamBindingOutput) Name added in v0.26.0

The name of the resource to manage IAM policies for.

func (RegionClusterIamBindingOutput) Project added in v0.26.0

The project in which the resource belongs. If it is not provided, a default will be supplied.

func (RegionClusterIamBindingOutput) Role added in v0.26.0

Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.

func (RegionClusterIamBindingOutput) ToRegionClusterIamBindingOutput added in v0.26.0

func (o RegionClusterIamBindingOutput) ToRegionClusterIamBindingOutput() RegionClusterIamBindingOutput

func (RegionClusterIamBindingOutput) ToRegionClusterIamBindingOutputWithContext added in v0.26.0

func (o RegionClusterIamBindingOutput) ToRegionClusterIamBindingOutputWithContext(ctx context.Context) RegionClusterIamBindingOutput

type RegionClusterIamBindingState added in v0.26.0

type RegionClusterIamBindingState struct {
}

func (RegionClusterIamBindingState) ElementType added in v0.26.0

type RegionClusterIamMember added in v0.26.0

type RegionClusterIamMember struct {
	pulumi.CustomResourceState

	// An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
	Condition iam.ConditionPtrOutput `pulumi:"condition"`
	// The etag of the resource's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Member pulumi.StringOutput `pulumi:"member"`
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringOutput `pulumi:"name"`
	// The project in which the resource belongs. If it is not provided, a default will be supplied.
	Project pulumi.StringOutput `pulumi:"project"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role pulumi.StringOutput `pulumi:"role"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.

func GetRegionClusterIamMember added in v0.26.0

func GetRegionClusterIamMember(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *RegionClusterIamMemberState, opts ...pulumi.ResourceOption) (*RegionClusterIamMember, error)

GetRegionClusterIamMember gets an existing RegionClusterIamMember resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewRegionClusterIamMember added in v0.26.0

func NewRegionClusterIamMember(ctx *pulumi.Context,
	name string, args *RegionClusterIamMemberArgs, opts ...pulumi.ResourceOption) (*RegionClusterIamMember, error)

NewRegionClusterIamMember registers a new resource with the given unique name, arguments, and options.

func (*RegionClusterIamMember) ElementType added in v0.26.0

func (*RegionClusterIamMember) ElementType() reflect.Type

func (*RegionClusterIamMember) ToRegionClusterIamMemberOutput added in v0.26.0

func (i *RegionClusterIamMember) ToRegionClusterIamMemberOutput() RegionClusterIamMemberOutput

func (*RegionClusterIamMember) ToRegionClusterIamMemberOutputWithContext added in v0.26.0

func (i *RegionClusterIamMember) ToRegionClusterIamMemberOutputWithContext(ctx context.Context) RegionClusterIamMemberOutput

type RegionClusterIamMemberArgs added in v0.26.0

type RegionClusterIamMemberArgs struct {
	// An IAM Condition for a given binding.
	Condition iam.ConditionPtrInput
	// Identity that will be granted the privilege in role. The entry can have one of the following values:
	//
	//  * user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
	//  * serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
	//  * group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
	//  * domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
	Member pulumi.StringInput
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringInput
	// The role that should be applied.
	Role pulumi.StringInput
}

The set of arguments for constructing a RegionClusterIamMember resource.

func (RegionClusterIamMemberArgs) ElementType added in v0.26.0

func (RegionClusterIamMemberArgs) ElementType() reflect.Type

type RegionClusterIamMemberInput added in v0.26.0

type RegionClusterIamMemberInput interface {
	pulumi.Input

	ToRegionClusterIamMemberOutput() RegionClusterIamMemberOutput
	ToRegionClusterIamMemberOutputWithContext(ctx context.Context) RegionClusterIamMemberOutput
}

type RegionClusterIamMemberOutput added in v0.26.0

type RegionClusterIamMemberOutput struct{ *pulumi.OutputState }

func (RegionClusterIamMemberOutput) Condition added in v0.26.0

An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.

func (RegionClusterIamMemberOutput) ElementType added in v0.26.0

func (RegionClusterIamMemberOutput) Etag added in v0.26.0

The etag of the resource's IAM policy.

func (RegionClusterIamMemberOutput) Member added in v0.26.0

Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.

func (RegionClusterIamMemberOutput) Name added in v0.26.0

The name of the resource to manage IAM policies for.

func (RegionClusterIamMemberOutput) Project added in v0.26.0

The project in which the resource belongs. If it is not provided, a default will be supplied.

func (RegionClusterIamMemberOutput) Role added in v0.26.0

Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.

func (RegionClusterIamMemberOutput) ToRegionClusterIamMemberOutput added in v0.26.0

func (o RegionClusterIamMemberOutput) ToRegionClusterIamMemberOutput() RegionClusterIamMemberOutput

func (RegionClusterIamMemberOutput) ToRegionClusterIamMemberOutputWithContext added in v0.26.0

func (o RegionClusterIamMemberOutput) ToRegionClusterIamMemberOutputWithContext(ctx context.Context) RegionClusterIamMemberOutput

type RegionClusterIamMemberState added in v0.26.0

type RegionClusterIamMemberState struct {
}

func (RegionClusterIamMemberState) ElementType added in v0.26.0

type RegionClusterIamPolicy

type RegionClusterIamPolicy struct {
	pulumi.CustomResourceState

	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings  BindingResponseArrayOutput `pulumi:"bindings"`
	ClusterId pulumi.StringOutput        `pulumi:"clusterId"`
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag     pulumi.StringOutput `pulumi:"etag"`
	Project  pulumi.StringOutput `pulumi:"project"`
	RegionId pulumi.StringOutput `pulumi:"regionId"`
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version pulumi.IntOutput `pulumi:"version"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors. Note - this resource's API doesn't support deletion. When deleted, the resource will persist on Google Cloud even though it will be deleted from Pulumi state.

func GetRegionClusterIamPolicy

func GetRegionClusterIamPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *RegionClusterIamPolicyState, opts ...pulumi.ResourceOption) (*RegionClusterIamPolicy, error)

GetRegionClusterIamPolicy gets an existing RegionClusterIamPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewRegionClusterIamPolicy

func NewRegionClusterIamPolicy(ctx *pulumi.Context,
	name string, args *RegionClusterIamPolicyArgs, opts ...pulumi.ResourceOption) (*RegionClusterIamPolicy, error)

NewRegionClusterIamPolicy registers a new resource with the given unique name, arguments, and options.

func (*RegionClusterIamPolicy) ElementType

func (*RegionClusterIamPolicy) ElementType() reflect.Type

func (*RegionClusterIamPolicy) ToRegionClusterIamPolicyOutput

func (i *RegionClusterIamPolicy) ToRegionClusterIamPolicyOutput() RegionClusterIamPolicyOutput

func (*RegionClusterIamPolicy) ToRegionClusterIamPolicyOutputWithContext

func (i *RegionClusterIamPolicy) ToRegionClusterIamPolicyOutputWithContext(ctx context.Context) RegionClusterIamPolicyOutput

type RegionClusterIamPolicyArgs

type RegionClusterIamPolicyArgs struct {
	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings  BindingArrayInput
	ClusterId pulumi.StringInput
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag     pulumi.StringPtrInput
	Project  pulumi.StringPtrInput
	RegionId pulumi.StringInput
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version pulumi.IntPtrInput
}

The set of arguments for constructing a RegionClusterIamPolicy resource.

func (RegionClusterIamPolicyArgs) ElementType

func (RegionClusterIamPolicyArgs) ElementType() reflect.Type

type RegionClusterIamPolicyInput

type RegionClusterIamPolicyInput interface {
	pulumi.Input

	ToRegionClusterIamPolicyOutput() RegionClusterIamPolicyOutput
	ToRegionClusterIamPolicyOutputWithContext(ctx context.Context) RegionClusterIamPolicyOutput
}

type RegionClusterIamPolicyOutput

type RegionClusterIamPolicyOutput struct{ *pulumi.OutputState }

func (RegionClusterIamPolicyOutput) Bindings added in v0.19.0

Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.

func (RegionClusterIamPolicyOutput) ClusterId added in v0.21.0

func (RegionClusterIamPolicyOutput) ElementType

func (RegionClusterIamPolicyOutput) Etag added in v0.19.0

etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.

func (RegionClusterIamPolicyOutput) Project added in v0.21.0

func (RegionClusterIamPolicyOutput) RegionId added in v0.21.0

func (RegionClusterIamPolicyOutput) ToRegionClusterIamPolicyOutput

func (o RegionClusterIamPolicyOutput) ToRegionClusterIamPolicyOutput() RegionClusterIamPolicyOutput

func (RegionClusterIamPolicyOutput) ToRegionClusterIamPolicyOutputWithContext

func (o RegionClusterIamPolicyOutput) ToRegionClusterIamPolicyOutputWithContext(ctx context.Context) RegionClusterIamPolicyOutput

func (RegionClusterIamPolicyOutput) Version added in v0.19.0

Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).

type RegionClusterIamPolicyState

type RegionClusterIamPolicyState struct {
}

func (RegionClusterIamPolicyState) ElementType

type RegionJobIamBinding added in v0.26.0

type RegionJobIamBinding struct {
	pulumi.CustomResourceState

	// An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
	Condition iam.ConditionPtrOutput `pulumi:"condition"`
	// The etag of the resource's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Members pulumi.StringArrayOutput `pulumi:"members"`
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringOutput `pulumi:"name"`
	// The project in which the resource belongs. If it is not provided, a default will be supplied.
	Project pulumi.StringOutput `pulumi:"project"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role pulumi.StringOutput `pulumi:"role"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.

func GetRegionJobIamBinding added in v0.26.0

func GetRegionJobIamBinding(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *RegionJobIamBindingState, opts ...pulumi.ResourceOption) (*RegionJobIamBinding, error)

GetRegionJobIamBinding gets an existing RegionJobIamBinding resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewRegionJobIamBinding added in v0.26.0

func NewRegionJobIamBinding(ctx *pulumi.Context,
	name string, args *RegionJobIamBindingArgs, opts ...pulumi.ResourceOption) (*RegionJobIamBinding, error)

NewRegionJobIamBinding registers a new resource with the given unique name, arguments, and options.

func (*RegionJobIamBinding) ElementType added in v0.26.0

func (*RegionJobIamBinding) ElementType() reflect.Type

func (*RegionJobIamBinding) ToRegionJobIamBindingOutput added in v0.26.0

func (i *RegionJobIamBinding) ToRegionJobIamBindingOutput() RegionJobIamBindingOutput

func (*RegionJobIamBinding) ToRegionJobIamBindingOutputWithContext added in v0.26.0

func (i *RegionJobIamBinding) ToRegionJobIamBindingOutputWithContext(ctx context.Context) RegionJobIamBindingOutput

type RegionJobIamBindingArgs added in v0.26.0

type RegionJobIamBindingArgs struct {
	// An IAM Condition for a given binding.
	Condition iam.ConditionPtrInput
	// Identities that will be granted the privilege in role. Each entry can have one of the following values:
	//
	//  * user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
	//  * serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
	//  * group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
	//  * domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
	Members pulumi.StringArrayInput
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringInput
	// The role that should be applied. Only one `IamBinding` can be used per role.
	Role pulumi.StringInput
}

The set of arguments for constructing a RegionJobIamBinding resource.

func (RegionJobIamBindingArgs) ElementType added in v0.26.0

func (RegionJobIamBindingArgs) ElementType() reflect.Type

type RegionJobIamBindingInput added in v0.26.0

type RegionJobIamBindingInput interface {
	pulumi.Input

	ToRegionJobIamBindingOutput() RegionJobIamBindingOutput
	ToRegionJobIamBindingOutputWithContext(ctx context.Context) RegionJobIamBindingOutput
}

type RegionJobIamBindingOutput added in v0.26.0

type RegionJobIamBindingOutput struct{ *pulumi.OutputState }

func (RegionJobIamBindingOutput) Condition added in v0.26.0

An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.

func (RegionJobIamBindingOutput) ElementType added in v0.26.0

func (RegionJobIamBindingOutput) ElementType() reflect.Type

func (RegionJobIamBindingOutput) Etag added in v0.26.0

The etag of the resource's IAM policy.

func (RegionJobIamBindingOutput) Members added in v0.26.0

Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.

func (RegionJobIamBindingOutput) Name added in v0.26.0

The name of the resource to manage IAM policies for.

func (RegionJobIamBindingOutput) Project added in v0.26.0

The project in which the resource belongs. If it is not provided, a default will be supplied.

func (RegionJobIamBindingOutput) Role added in v0.26.0

Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.

func (RegionJobIamBindingOutput) ToRegionJobIamBindingOutput added in v0.26.0

func (o RegionJobIamBindingOutput) ToRegionJobIamBindingOutput() RegionJobIamBindingOutput

func (RegionJobIamBindingOutput) ToRegionJobIamBindingOutputWithContext added in v0.26.0

func (o RegionJobIamBindingOutput) ToRegionJobIamBindingOutputWithContext(ctx context.Context) RegionJobIamBindingOutput

type RegionJobIamBindingState added in v0.26.0

type RegionJobIamBindingState struct {
}

func (RegionJobIamBindingState) ElementType added in v0.26.0

func (RegionJobIamBindingState) ElementType() reflect.Type

type RegionJobIamMember added in v0.26.0

type RegionJobIamMember struct {
	pulumi.CustomResourceState

	// An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
	Condition iam.ConditionPtrOutput `pulumi:"condition"`
	// The etag of the resource's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Member pulumi.StringOutput `pulumi:"member"`
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringOutput `pulumi:"name"`
	// The project in which the resource belongs. If it is not provided, a default will be supplied.
	Project pulumi.StringOutput `pulumi:"project"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role pulumi.StringOutput `pulumi:"role"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.

func GetRegionJobIamMember added in v0.26.0

func GetRegionJobIamMember(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *RegionJobIamMemberState, opts ...pulumi.ResourceOption) (*RegionJobIamMember, error)

GetRegionJobIamMember gets an existing RegionJobIamMember resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewRegionJobIamMember added in v0.26.0

func NewRegionJobIamMember(ctx *pulumi.Context,
	name string, args *RegionJobIamMemberArgs, opts ...pulumi.ResourceOption) (*RegionJobIamMember, error)

NewRegionJobIamMember registers a new resource with the given unique name, arguments, and options.

func (*RegionJobIamMember) ElementType added in v0.26.0

func (*RegionJobIamMember) ElementType() reflect.Type

func (*RegionJobIamMember) ToRegionJobIamMemberOutput added in v0.26.0

func (i *RegionJobIamMember) ToRegionJobIamMemberOutput() RegionJobIamMemberOutput

func (*RegionJobIamMember) ToRegionJobIamMemberOutputWithContext added in v0.26.0

func (i *RegionJobIamMember) ToRegionJobIamMemberOutputWithContext(ctx context.Context) RegionJobIamMemberOutput

type RegionJobIamMemberArgs added in v0.26.0

type RegionJobIamMemberArgs struct {
	// An IAM Condition for a given binding.
	Condition iam.ConditionPtrInput
	// Identity that will be granted the privilege in role. The entry can have one of the following values:
	//
	//  * user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
	//  * serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
	//  * group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
	//  * domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
	Member pulumi.StringInput
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringInput
	// The role that should be applied.
	Role pulumi.StringInput
}

The set of arguments for constructing a RegionJobIamMember resource.

func (RegionJobIamMemberArgs) ElementType added in v0.26.0

func (RegionJobIamMemberArgs) ElementType() reflect.Type

type RegionJobIamMemberInput added in v0.26.0

type RegionJobIamMemberInput interface {
	pulumi.Input

	ToRegionJobIamMemberOutput() RegionJobIamMemberOutput
	ToRegionJobIamMemberOutputWithContext(ctx context.Context) RegionJobIamMemberOutput
}

type RegionJobIamMemberOutput added in v0.26.0

type RegionJobIamMemberOutput struct{ *pulumi.OutputState }

func (RegionJobIamMemberOutput) Condition added in v0.26.0

An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.

func (RegionJobIamMemberOutput) ElementType added in v0.26.0

func (RegionJobIamMemberOutput) ElementType() reflect.Type

func (RegionJobIamMemberOutput) Etag added in v0.26.0

The etag of the resource's IAM policy.

func (RegionJobIamMemberOutput) Member added in v0.26.0

Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.

func (RegionJobIamMemberOutput) Name added in v0.26.0

The name of the resource to manage IAM policies for.

func (RegionJobIamMemberOutput) Project added in v0.26.0

The project in which the resource belongs. If it is not provided, a default will be supplied.

func (RegionJobIamMemberOutput) Role added in v0.26.0

Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.

func (RegionJobIamMemberOutput) ToRegionJobIamMemberOutput added in v0.26.0

func (o RegionJobIamMemberOutput) ToRegionJobIamMemberOutput() RegionJobIamMemberOutput

func (RegionJobIamMemberOutput) ToRegionJobIamMemberOutputWithContext added in v0.26.0

func (o RegionJobIamMemberOutput) ToRegionJobIamMemberOutputWithContext(ctx context.Context) RegionJobIamMemberOutput

type RegionJobIamMemberState added in v0.26.0

type RegionJobIamMemberState struct {
}

func (RegionJobIamMemberState) ElementType added in v0.26.0

func (RegionJobIamMemberState) ElementType() reflect.Type

type RegionJobIamPolicy

type RegionJobIamPolicy struct {
	pulumi.CustomResourceState

	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings BindingResponseArrayOutput `pulumi:"bindings"`
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag     pulumi.StringOutput `pulumi:"etag"`
	JobId    pulumi.StringOutput `pulumi:"jobId"`
	Project  pulumi.StringOutput `pulumi:"project"`
	RegionId pulumi.StringOutput `pulumi:"regionId"`
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version pulumi.IntOutput `pulumi:"version"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors. Note - this resource's API doesn't support deletion. When deleted, the resource will persist on Google Cloud even though it will be deleted from Pulumi state.

func GetRegionJobIamPolicy

func GetRegionJobIamPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *RegionJobIamPolicyState, opts ...pulumi.ResourceOption) (*RegionJobIamPolicy, error)

GetRegionJobIamPolicy gets an existing RegionJobIamPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewRegionJobIamPolicy

func NewRegionJobIamPolicy(ctx *pulumi.Context,
	name string, args *RegionJobIamPolicyArgs, opts ...pulumi.ResourceOption) (*RegionJobIamPolicy, error)

NewRegionJobIamPolicy registers a new resource with the given unique name, arguments, and options.

func (*RegionJobIamPolicy) ElementType

func (*RegionJobIamPolicy) ElementType() reflect.Type

func (*RegionJobIamPolicy) ToRegionJobIamPolicyOutput

func (i *RegionJobIamPolicy) ToRegionJobIamPolicyOutput() RegionJobIamPolicyOutput

func (*RegionJobIamPolicy) ToRegionJobIamPolicyOutputWithContext

func (i *RegionJobIamPolicy) ToRegionJobIamPolicyOutputWithContext(ctx context.Context) RegionJobIamPolicyOutput

type RegionJobIamPolicyArgs

type RegionJobIamPolicyArgs struct {
	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings BindingArrayInput
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag     pulumi.StringPtrInput
	JobId    pulumi.StringInput
	Project  pulumi.StringPtrInput
	RegionId pulumi.StringInput
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version pulumi.IntPtrInput
}

The set of arguments for constructing a RegionJobIamPolicy resource.

func (RegionJobIamPolicyArgs) ElementType

func (RegionJobIamPolicyArgs) ElementType() reflect.Type

type RegionJobIamPolicyInput

type RegionJobIamPolicyInput interface {
	pulumi.Input

	ToRegionJobIamPolicyOutput() RegionJobIamPolicyOutput
	ToRegionJobIamPolicyOutputWithContext(ctx context.Context) RegionJobIamPolicyOutput
}

type RegionJobIamPolicyOutput

type RegionJobIamPolicyOutput struct{ *pulumi.OutputState }

func (RegionJobIamPolicyOutput) Bindings added in v0.19.0

Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.

func (RegionJobIamPolicyOutput) ElementType

func (RegionJobIamPolicyOutput) ElementType() reflect.Type

func (RegionJobIamPolicyOutput) Etag added in v0.19.0

etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.

func (RegionJobIamPolicyOutput) JobId added in v0.21.0

func (RegionJobIamPolicyOutput) Project added in v0.21.0

func (RegionJobIamPolicyOutput) RegionId added in v0.21.0

func (RegionJobIamPolicyOutput) ToRegionJobIamPolicyOutput

func (o RegionJobIamPolicyOutput) ToRegionJobIamPolicyOutput() RegionJobIamPolicyOutput

func (RegionJobIamPolicyOutput) ToRegionJobIamPolicyOutputWithContext

func (o RegionJobIamPolicyOutput) ToRegionJobIamPolicyOutputWithContext(ctx context.Context) RegionJobIamPolicyOutput

func (RegionJobIamPolicyOutput) Version added in v0.19.0

Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).

type RegionJobIamPolicyState

type RegionJobIamPolicyState struct {
}

func (RegionJobIamPolicyState) ElementType

func (RegionJobIamPolicyState) ElementType() reflect.Type

type RegionOperationIamBinding added in v0.26.0

type RegionOperationIamBinding struct {
	pulumi.CustomResourceState

	// An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
	Condition iam.ConditionPtrOutput `pulumi:"condition"`
	// The etag of the resource's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Members pulumi.StringArrayOutput `pulumi:"members"`
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringOutput `pulumi:"name"`
	// The project in which the resource belongs. If it is not provided, a default will be supplied.
	Project pulumi.StringOutput `pulumi:"project"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role pulumi.StringOutput `pulumi:"role"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.

func GetRegionOperationIamBinding added in v0.26.0

func GetRegionOperationIamBinding(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *RegionOperationIamBindingState, opts ...pulumi.ResourceOption) (*RegionOperationIamBinding, error)

GetRegionOperationIamBinding gets an existing RegionOperationIamBinding resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewRegionOperationIamBinding added in v0.26.0

func NewRegionOperationIamBinding(ctx *pulumi.Context,
	name string, args *RegionOperationIamBindingArgs, opts ...pulumi.ResourceOption) (*RegionOperationIamBinding, error)

NewRegionOperationIamBinding registers a new resource with the given unique name, arguments, and options.

func (*RegionOperationIamBinding) ElementType added in v0.26.0

func (*RegionOperationIamBinding) ElementType() reflect.Type

func (*RegionOperationIamBinding) ToRegionOperationIamBindingOutput added in v0.26.0

func (i *RegionOperationIamBinding) ToRegionOperationIamBindingOutput() RegionOperationIamBindingOutput

func (*RegionOperationIamBinding) ToRegionOperationIamBindingOutputWithContext added in v0.26.0

func (i *RegionOperationIamBinding) ToRegionOperationIamBindingOutputWithContext(ctx context.Context) RegionOperationIamBindingOutput

type RegionOperationIamBindingArgs added in v0.26.0

type RegionOperationIamBindingArgs struct {
	// An IAM Condition for a given binding.
	Condition iam.ConditionPtrInput
	// Identities that will be granted the privilege in role. Each entry can have one of the following values:
	//
	//  * user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
	//  * serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
	//  * group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
	//  * domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
	Members pulumi.StringArrayInput
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringInput
	// The role that should be applied. Only one `IamBinding` can be used per role.
	Role pulumi.StringInput
}

The set of arguments for constructing a RegionOperationIamBinding resource.

func (RegionOperationIamBindingArgs) ElementType added in v0.26.0

type RegionOperationIamBindingInput added in v0.26.0

type RegionOperationIamBindingInput interface {
	pulumi.Input

	ToRegionOperationIamBindingOutput() RegionOperationIamBindingOutput
	ToRegionOperationIamBindingOutputWithContext(ctx context.Context) RegionOperationIamBindingOutput
}

type RegionOperationIamBindingOutput added in v0.26.0

type RegionOperationIamBindingOutput struct{ *pulumi.OutputState }

func (RegionOperationIamBindingOutput) Condition added in v0.26.0

An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.

func (RegionOperationIamBindingOutput) ElementType added in v0.26.0

func (RegionOperationIamBindingOutput) Etag added in v0.26.0

The etag of the resource's IAM policy.

func (RegionOperationIamBindingOutput) Members added in v0.26.0

Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.

func (RegionOperationIamBindingOutput) Name added in v0.26.0

The name of the resource to manage IAM policies for.

func (RegionOperationIamBindingOutput) Project added in v0.26.0

The project in which the resource belongs. If it is not provided, a default will be supplied.

func (RegionOperationIamBindingOutput) Role added in v0.26.0

Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.

func (RegionOperationIamBindingOutput) ToRegionOperationIamBindingOutput added in v0.26.0

func (o RegionOperationIamBindingOutput) ToRegionOperationIamBindingOutput() RegionOperationIamBindingOutput

func (RegionOperationIamBindingOutput) ToRegionOperationIamBindingOutputWithContext added in v0.26.0

func (o RegionOperationIamBindingOutput) ToRegionOperationIamBindingOutputWithContext(ctx context.Context) RegionOperationIamBindingOutput

type RegionOperationIamBindingState added in v0.26.0

type RegionOperationIamBindingState struct {
}

func (RegionOperationIamBindingState) ElementType added in v0.26.0

type RegionOperationIamMember added in v0.26.0

type RegionOperationIamMember struct {
	pulumi.CustomResourceState

	// An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
	Condition iam.ConditionPtrOutput `pulumi:"condition"`
	// The etag of the resource's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Member pulumi.StringOutput `pulumi:"member"`
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringOutput `pulumi:"name"`
	// The project in which the resource belongs. If it is not provided, a default will be supplied.
	Project pulumi.StringOutput `pulumi:"project"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role pulumi.StringOutput `pulumi:"role"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.

func GetRegionOperationIamMember added in v0.26.0

func GetRegionOperationIamMember(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *RegionOperationIamMemberState, opts ...pulumi.ResourceOption) (*RegionOperationIamMember, error)

GetRegionOperationIamMember gets an existing RegionOperationIamMember resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewRegionOperationIamMember added in v0.26.0

func NewRegionOperationIamMember(ctx *pulumi.Context,
	name string, args *RegionOperationIamMemberArgs, opts ...pulumi.ResourceOption) (*RegionOperationIamMember, error)

NewRegionOperationIamMember registers a new resource with the given unique name, arguments, and options.

func (*RegionOperationIamMember) ElementType added in v0.26.0

func (*RegionOperationIamMember) ElementType() reflect.Type

func (*RegionOperationIamMember) ToRegionOperationIamMemberOutput added in v0.26.0

func (i *RegionOperationIamMember) ToRegionOperationIamMemberOutput() RegionOperationIamMemberOutput

func (*RegionOperationIamMember) ToRegionOperationIamMemberOutputWithContext added in v0.26.0

func (i *RegionOperationIamMember) ToRegionOperationIamMemberOutputWithContext(ctx context.Context) RegionOperationIamMemberOutput

type RegionOperationIamMemberArgs added in v0.26.0

type RegionOperationIamMemberArgs struct {
	// An IAM Condition for a given binding.
	Condition iam.ConditionPtrInput
	// Identity that will be granted the privilege in role. The entry can have one of the following values:
	//
	//  * user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
	//  * serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
	//  * group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
	//  * domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
	Member pulumi.StringInput
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringInput
	// The role that should be applied.
	Role pulumi.StringInput
}

The set of arguments for constructing a RegionOperationIamMember resource.

func (RegionOperationIamMemberArgs) ElementType added in v0.26.0

type RegionOperationIamMemberInput added in v0.26.0

type RegionOperationIamMemberInput interface {
	pulumi.Input

	ToRegionOperationIamMemberOutput() RegionOperationIamMemberOutput
	ToRegionOperationIamMemberOutputWithContext(ctx context.Context) RegionOperationIamMemberOutput
}

type RegionOperationIamMemberOutput added in v0.26.0

type RegionOperationIamMemberOutput struct{ *pulumi.OutputState }

func (RegionOperationIamMemberOutput) Condition added in v0.26.0

An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.

func (RegionOperationIamMemberOutput) ElementType added in v0.26.0

func (RegionOperationIamMemberOutput) Etag added in v0.26.0

The etag of the resource's IAM policy.

func (RegionOperationIamMemberOutput) Member added in v0.26.0

Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.

func (RegionOperationIamMemberOutput) Name added in v0.26.0

The name of the resource to manage IAM policies for.

func (RegionOperationIamMemberOutput) Project added in v0.26.0

The project in which the resource belongs. If it is not provided, a default will be supplied.

func (RegionOperationIamMemberOutput) Role added in v0.26.0

Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.

func (RegionOperationIamMemberOutput) ToRegionOperationIamMemberOutput added in v0.26.0

func (o RegionOperationIamMemberOutput) ToRegionOperationIamMemberOutput() RegionOperationIamMemberOutput

func (RegionOperationIamMemberOutput) ToRegionOperationIamMemberOutputWithContext added in v0.26.0

func (o RegionOperationIamMemberOutput) ToRegionOperationIamMemberOutputWithContext(ctx context.Context) RegionOperationIamMemberOutput

type RegionOperationIamMemberState added in v0.26.0

type RegionOperationIamMemberState struct {
}

func (RegionOperationIamMemberState) ElementType added in v0.26.0

type RegionOperationIamPolicy

type RegionOperationIamPolicy struct {
	pulumi.CustomResourceState

	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings BindingResponseArrayOutput `pulumi:"bindings"`
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag        pulumi.StringOutput `pulumi:"etag"`
	OperationId pulumi.StringOutput `pulumi:"operationId"`
	Project     pulumi.StringOutput `pulumi:"project"`
	RegionId    pulumi.StringOutput `pulumi:"regionId"`
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version pulumi.IntOutput `pulumi:"version"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors. Note - this resource's API doesn't support deletion. When deleted, the resource will persist on Google Cloud even though it will be deleted from Pulumi state.

func GetRegionOperationIamPolicy

func GetRegionOperationIamPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *RegionOperationIamPolicyState, opts ...pulumi.ResourceOption) (*RegionOperationIamPolicy, error)

GetRegionOperationIamPolicy gets an existing RegionOperationIamPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewRegionOperationIamPolicy

func NewRegionOperationIamPolicy(ctx *pulumi.Context,
	name string, args *RegionOperationIamPolicyArgs, opts ...pulumi.ResourceOption) (*RegionOperationIamPolicy, error)

NewRegionOperationIamPolicy registers a new resource with the given unique name, arguments, and options.

func (*RegionOperationIamPolicy) ElementType

func (*RegionOperationIamPolicy) ElementType() reflect.Type

func (*RegionOperationIamPolicy) ToRegionOperationIamPolicyOutput

func (i *RegionOperationIamPolicy) ToRegionOperationIamPolicyOutput() RegionOperationIamPolicyOutput

func (*RegionOperationIamPolicy) ToRegionOperationIamPolicyOutputWithContext

func (i *RegionOperationIamPolicy) ToRegionOperationIamPolicyOutputWithContext(ctx context.Context) RegionOperationIamPolicyOutput

type RegionOperationIamPolicyArgs

type RegionOperationIamPolicyArgs struct {
	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings BindingArrayInput
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag        pulumi.StringPtrInput
	OperationId pulumi.StringInput
	Project     pulumi.StringPtrInput
	RegionId    pulumi.StringInput
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version pulumi.IntPtrInput
}

The set of arguments for constructing a RegionOperationIamPolicy resource.

func (RegionOperationIamPolicyArgs) ElementType

type RegionOperationIamPolicyInput

type RegionOperationIamPolicyInput interface {
	pulumi.Input

	ToRegionOperationIamPolicyOutput() RegionOperationIamPolicyOutput
	ToRegionOperationIamPolicyOutputWithContext(ctx context.Context) RegionOperationIamPolicyOutput
}

type RegionOperationIamPolicyOutput

type RegionOperationIamPolicyOutput struct{ *pulumi.OutputState }

func (RegionOperationIamPolicyOutput) Bindings added in v0.19.0

Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.

func (RegionOperationIamPolicyOutput) ElementType

func (RegionOperationIamPolicyOutput) Etag added in v0.19.0

etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.

func (RegionOperationIamPolicyOutput) OperationId added in v0.21.0

func (RegionOperationIamPolicyOutput) Project added in v0.21.0

func (RegionOperationIamPolicyOutput) RegionId added in v0.21.0

func (RegionOperationIamPolicyOutput) ToRegionOperationIamPolicyOutput

func (o RegionOperationIamPolicyOutput) ToRegionOperationIamPolicyOutput() RegionOperationIamPolicyOutput

func (RegionOperationIamPolicyOutput) ToRegionOperationIamPolicyOutputWithContext

func (o RegionOperationIamPolicyOutput) ToRegionOperationIamPolicyOutputWithContext(ctx context.Context) RegionOperationIamPolicyOutput

func (RegionOperationIamPolicyOutput) Version added in v0.19.0

Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).

type RegionOperationIamPolicyState

type RegionOperationIamPolicyState struct {
}

func (RegionOperationIamPolicyState) ElementType

type RegionWorkflowTemplateIamBinding added in v0.26.0

type RegionWorkflowTemplateIamBinding struct {
	pulumi.CustomResourceState

	// An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
	Condition iam.ConditionPtrOutput `pulumi:"condition"`
	// The etag of the resource's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Members pulumi.StringArrayOutput `pulumi:"members"`
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringOutput `pulumi:"name"`
	// The project in which the resource belongs. If it is not provided, a default will be supplied.
	Project pulumi.StringOutput `pulumi:"project"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role pulumi.StringOutput `pulumi:"role"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.

func GetRegionWorkflowTemplateIamBinding added in v0.26.0

func GetRegionWorkflowTemplateIamBinding(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *RegionWorkflowTemplateIamBindingState, opts ...pulumi.ResourceOption) (*RegionWorkflowTemplateIamBinding, error)

GetRegionWorkflowTemplateIamBinding gets an existing RegionWorkflowTemplateIamBinding resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewRegionWorkflowTemplateIamBinding added in v0.26.0

func NewRegionWorkflowTemplateIamBinding(ctx *pulumi.Context,
	name string, args *RegionWorkflowTemplateIamBindingArgs, opts ...pulumi.ResourceOption) (*RegionWorkflowTemplateIamBinding, error)

NewRegionWorkflowTemplateIamBinding registers a new resource with the given unique name, arguments, and options.

func (*RegionWorkflowTemplateIamBinding) ElementType added in v0.26.0

func (*RegionWorkflowTemplateIamBinding) ToRegionWorkflowTemplateIamBindingOutput added in v0.26.0

func (i *RegionWorkflowTemplateIamBinding) ToRegionWorkflowTemplateIamBindingOutput() RegionWorkflowTemplateIamBindingOutput

func (*RegionWorkflowTemplateIamBinding) ToRegionWorkflowTemplateIamBindingOutputWithContext added in v0.26.0

func (i *RegionWorkflowTemplateIamBinding) ToRegionWorkflowTemplateIamBindingOutputWithContext(ctx context.Context) RegionWorkflowTemplateIamBindingOutput

type RegionWorkflowTemplateIamBindingArgs added in v0.26.0

type RegionWorkflowTemplateIamBindingArgs struct {
	// An IAM Condition for a given binding.
	Condition iam.ConditionPtrInput
	// Identities that will be granted the privilege in role. Each entry can have one of the following values:
	//
	//  * user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
	//  * serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
	//  * group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
	//  * domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
	Members pulumi.StringArrayInput
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringInput
	// The role that should be applied. Only one `IamBinding` can be used per role.
	Role pulumi.StringInput
}

The set of arguments for constructing a RegionWorkflowTemplateIamBinding resource.

func (RegionWorkflowTemplateIamBindingArgs) ElementType added in v0.26.0

type RegionWorkflowTemplateIamBindingInput added in v0.26.0

type RegionWorkflowTemplateIamBindingInput interface {
	pulumi.Input

	ToRegionWorkflowTemplateIamBindingOutput() RegionWorkflowTemplateIamBindingOutput
	ToRegionWorkflowTemplateIamBindingOutputWithContext(ctx context.Context) RegionWorkflowTemplateIamBindingOutput
}

type RegionWorkflowTemplateIamBindingOutput added in v0.26.0

type RegionWorkflowTemplateIamBindingOutput struct{ *pulumi.OutputState }

func (RegionWorkflowTemplateIamBindingOutput) Condition added in v0.26.0

An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.

func (RegionWorkflowTemplateIamBindingOutput) ElementType added in v0.26.0

func (RegionWorkflowTemplateIamBindingOutput) Etag added in v0.26.0

The etag of the resource's IAM policy.

func (RegionWorkflowTemplateIamBindingOutput) Members added in v0.26.0

Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.

func (RegionWorkflowTemplateIamBindingOutput) Name added in v0.26.0

The name of the resource to manage IAM policies for.

func (RegionWorkflowTemplateIamBindingOutput) Project added in v0.26.0

The project in which the resource belongs. If it is not provided, a default will be supplied.

func (RegionWorkflowTemplateIamBindingOutput) Role added in v0.26.0

Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.

func (RegionWorkflowTemplateIamBindingOutput) ToRegionWorkflowTemplateIamBindingOutput added in v0.26.0

func (o RegionWorkflowTemplateIamBindingOutput) ToRegionWorkflowTemplateIamBindingOutput() RegionWorkflowTemplateIamBindingOutput

func (RegionWorkflowTemplateIamBindingOutput) ToRegionWorkflowTemplateIamBindingOutputWithContext added in v0.26.0

func (o RegionWorkflowTemplateIamBindingOutput) ToRegionWorkflowTemplateIamBindingOutputWithContext(ctx context.Context) RegionWorkflowTemplateIamBindingOutput

type RegionWorkflowTemplateIamBindingState added in v0.26.0

type RegionWorkflowTemplateIamBindingState struct {
}

func (RegionWorkflowTemplateIamBindingState) ElementType added in v0.26.0

type RegionWorkflowTemplateIamMember added in v0.26.0

type RegionWorkflowTemplateIamMember struct {
	pulumi.CustomResourceState

	// An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
	Condition iam.ConditionPtrOutput `pulumi:"condition"`
	// The etag of the resource's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Member pulumi.StringOutput `pulumi:"member"`
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringOutput `pulumi:"name"`
	// The project in which the resource belongs. If it is not provided, a default will be supplied.
	Project pulumi.StringOutput `pulumi:"project"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role pulumi.StringOutput `pulumi:"role"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.

func GetRegionWorkflowTemplateIamMember added in v0.26.0

func GetRegionWorkflowTemplateIamMember(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *RegionWorkflowTemplateIamMemberState, opts ...pulumi.ResourceOption) (*RegionWorkflowTemplateIamMember, error)

GetRegionWorkflowTemplateIamMember gets an existing RegionWorkflowTemplateIamMember resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewRegionWorkflowTemplateIamMember added in v0.26.0

func NewRegionWorkflowTemplateIamMember(ctx *pulumi.Context,
	name string, args *RegionWorkflowTemplateIamMemberArgs, opts ...pulumi.ResourceOption) (*RegionWorkflowTemplateIamMember, error)

NewRegionWorkflowTemplateIamMember registers a new resource with the given unique name, arguments, and options.

func (*RegionWorkflowTemplateIamMember) ElementType added in v0.26.0

func (*RegionWorkflowTemplateIamMember) ToRegionWorkflowTemplateIamMemberOutput added in v0.26.0

func (i *RegionWorkflowTemplateIamMember) ToRegionWorkflowTemplateIamMemberOutput() RegionWorkflowTemplateIamMemberOutput

func (*RegionWorkflowTemplateIamMember) ToRegionWorkflowTemplateIamMemberOutputWithContext added in v0.26.0

func (i *RegionWorkflowTemplateIamMember) ToRegionWorkflowTemplateIamMemberOutputWithContext(ctx context.Context) RegionWorkflowTemplateIamMemberOutput

type RegionWorkflowTemplateIamMemberArgs added in v0.26.0

type RegionWorkflowTemplateIamMemberArgs struct {
	// An IAM Condition for a given binding.
	Condition iam.ConditionPtrInput
	// Identity that will be granted the privilege in role. The entry can have one of the following values:
	//
	//  * user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
	//  * serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
	//  * group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
	//  * domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
	Member pulumi.StringInput
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringInput
	// The role that should be applied.
	Role pulumi.StringInput
}

The set of arguments for constructing a RegionWorkflowTemplateIamMember resource.

func (RegionWorkflowTemplateIamMemberArgs) ElementType added in v0.26.0

type RegionWorkflowTemplateIamMemberInput added in v0.26.0

type RegionWorkflowTemplateIamMemberInput interface {
	pulumi.Input

	ToRegionWorkflowTemplateIamMemberOutput() RegionWorkflowTemplateIamMemberOutput
	ToRegionWorkflowTemplateIamMemberOutputWithContext(ctx context.Context) RegionWorkflowTemplateIamMemberOutput
}

type RegionWorkflowTemplateIamMemberOutput added in v0.26.0

type RegionWorkflowTemplateIamMemberOutput struct{ *pulumi.OutputState }

func (RegionWorkflowTemplateIamMemberOutput) Condition added in v0.26.0

An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.

func (RegionWorkflowTemplateIamMemberOutput) ElementType added in v0.26.0

func (RegionWorkflowTemplateIamMemberOutput) Etag added in v0.26.0

The etag of the resource's IAM policy.

func (RegionWorkflowTemplateIamMemberOutput) Member added in v0.26.0

Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.

func (RegionWorkflowTemplateIamMemberOutput) Name added in v0.26.0

The name of the resource to manage IAM policies for.

func (RegionWorkflowTemplateIamMemberOutput) Project added in v0.26.0

The project in which the resource belongs. If it is not provided, a default will be supplied.

func (RegionWorkflowTemplateIamMemberOutput) Role added in v0.26.0

Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.

func (RegionWorkflowTemplateIamMemberOutput) ToRegionWorkflowTemplateIamMemberOutput added in v0.26.0

func (o RegionWorkflowTemplateIamMemberOutput) ToRegionWorkflowTemplateIamMemberOutput() RegionWorkflowTemplateIamMemberOutput

func (RegionWorkflowTemplateIamMemberOutput) ToRegionWorkflowTemplateIamMemberOutputWithContext added in v0.26.0

func (o RegionWorkflowTemplateIamMemberOutput) ToRegionWorkflowTemplateIamMemberOutputWithContext(ctx context.Context) RegionWorkflowTemplateIamMemberOutput

type RegionWorkflowTemplateIamMemberState added in v0.26.0

type RegionWorkflowTemplateIamMemberState struct {
}

func (RegionWorkflowTemplateIamMemberState) ElementType added in v0.26.0

type RegionWorkflowTemplateIamPolicy

type RegionWorkflowTemplateIamPolicy struct {
	pulumi.CustomResourceState

	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings BindingResponseArrayOutput `pulumi:"bindings"`
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag     pulumi.StringOutput `pulumi:"etag"`
	Project  pulumi.StringOutput `pulumi:"project"`
	RegionId pulumi.StringOutput `pulumi:"regionId"`
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version            pulumi.IntOutput    `pulumi:"version"`
	WorkflowTemplateId pulumi.StringOutput `pulumi:"workflowTemplateId"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors. Note - this resource's API doesn't support deletion. When deleted, the resource will persist on Google Cloud even though it will be deleted from Pulumi state.

func GetRegionWorkflowTemplateIamPolicy

func GetRegionWorkflowTemplateIamPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *RegionWorkflowTemplateIamPolicyState, opts ...pulumi.ResourceOption) (*RegionWorkflowTemplateIamPolicy, error)

GetRegionWorkflowTemplateIamPolicy gets an existing RegionWorkflowTemplateIamPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewRegionWorkflowTemplateIamPolicy

func NewRegionWorkflowTemplateIamPolicy(ctx *pulumi.Context,
	name string, args *RegionWorkflowTemplateIamPolicyArgs, opts ...pulumi.ResourceOption) (*RegionWorkflowTemplateIamPolicy, error)

NewRegionWorkflowTemplateIamPolicy registers a new resource with the given unique name, arguments, and options.

func (*RegionWorkflowTemplateIamPolicy) ElementType

func (*RegionWorkflowTemplateIamPolicy) ToRegionWorkflowTemplateIamPolicyOutput

func (i *RegionWorkflowTemplateIamPolicy) ToRegionWorkflowTemplateIamPolicyOutput() RegionWorkflowTemplateIamPolicyOutput

func (*RegionWorkflowTemplateIamPolicy) ToRegionWorkflowTemplateIamPolicyOutputWithContext

func (i *RegionWorkflowTemplateIamPolicy) ToRegionWorkflowTemplateIamPolicyOutputWithContext(ctx context.Context) RegionWorkflowTemplateIamPolicyOutput

type RegionWorkflowTemplateIamPolicyArgs

type RegionWorkflowTemplateIamPolicyArgs struct {
	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings BindingArrayInput
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag     pulumi.StringPtrInput
	Project  pulumi.StringPtrInput
	RegionId pulumi.StringInput
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version            pulumi.IntPtrInput
	WorkflowTemplateId pulumi.StringInput
}

The set of arguments for constructing a RegionWorkflowTemplateIamPolicy resource.

func (RegionWorkflowTemplateIamPolicyArgs) ElementType

type RegionWorkflowTemplateIamPolicyInput

type RegionWorkflowTemplateIamPolicyInput interface {
	pulumi.Input

	ToRegionWorkflowTemplateIamPolicyOutput() RegionWorkflowTemplateIamPolicyOutput
	ToRegionWorkflowTemplateIamPolicyOutputWithContext(ctx context.Context) RegionWorkflowTemplateIamPolicyOutput
}

type RegionWorkflowTemplateIamPolicyOutput

type RegionWorkflowTemplateIamPolicyOutput struct{ *pulumi.OutputState }

func (RegionWorkflowTemplateIamPolicyOutput) Bindings added in v0.19.0

Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.

func (RegionWorkflowTemplateIamPolicyOutput) ElementType

func (RegionWorkflowTemplateIamPolicyOutput) Etag added in v0.19.0

etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.

func (RegionWorkflowTemplateIamPolicyOutput) Project added in v0.21.0

func (RegionWorkflowTemplateIamPolicyOutput) RegionId added in v0.21.0

func (RegionWorkflowTemplateIamPolicyOutput) ToRegionWorkflowTemplateIamPolicyOutput

func (o RegionWorkflowTemplateIamPolicyOutput) ToRegionWorkflowTemplateIamPolicyOutput() RegionWorkflowTemplateIamPolicyOutput

func (RegionWorkflowTemplateIamPolicyOutput) ToRegionWorkflowTemplateIamPolicyOutputWithContext

func (o RegionWorkflowTemplateIamPolicyOutput) ToRegionWorkflowTemplateIamPolicyOutputWithContext(ctx context.Context) RegionWorkflowTemplateIamPolicyOutput

func (RegionWorkflowTemplateIamPolicyOutput) Version added in v0.19.0

Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).

func (RegionWorkflowTemplateIamPolicyOutput) WorkflowTemplateId added in v0.21.0

type RegionWorkflowTemplateIamPolicyState

type RegionWorkflowTemplateIamPolicyState struct {
}

func (RegionWorkflowTemplateIamPolicyState) ElementType

type RepositoryConfig added in v0.32.0

type RepositoryConfig struct {
	// Optional. Configuration for PyPi repository.
	PypiRepositoryConfig *PyPiRepositoryConfig `pulumi:"pypiRepositoryConfig"`
}

Configuration for dependency repositories

type RepositoryConfigArgs added in v0.32.0

type RepositoryConfigArgs struct {
	// Optional. Configuration for PyPi repository.
	PypiRepositoryConfig PyPiRepositoryConfigPtrInput `pulumi:"pypiRepositoryConfig"`
}

Configuration for dependency repositories

func (RepositoryConfigArgs) ElementType added in v0.32.0

func (RepositoryConfigArgs) ElementType() reflect.Type

func (RepositoryConfigArgs) ToRepositoryConfigOutput added in v0.32.0

func (i RepositoryConfigArgs) ToRepositoryConfigOutput() RepositoryConfigOutput

func (RepositoryConfigArgs) ToRepositoryConfigOutputWithContext added in v0.32.0

func (i RepositoryConfigArgs) ToRepositoryConfigOutputWithContext(ctx context.Context) RepositoryConfigOutput

func (RepositoryConfigArgs) ToRepositoryConfigPtrOutput added in v0.32.0

func (i RepositoryConfigArgs) ToRepositoryConfigPtrOutput() RepositoryConfigPtrOutput

func (RepositoryConfigArgs) ToRepositoryConfigPtrOutputWithContext added in v0.32.0

func (i RepositoryConfigArgs) ToRepositoryConfigPtrOutputWithContext(ctx context.Context) RepositoryConfigPtrOutput

type RepositoryConfigInput added in v0.32.0

type RepositoryConfigInput interface {
	pulumi.Input

	ToRepositoryConfigOutput() RepositoryConfigOutput
	ToRepositoryConfigOutputWithContext(context.Context) RepositoryConfigOutput
}

RepositoryConfigInput is an input type that accepts RepositoryConfigArgs and RepositoryConfigOutput values. You can construct a concrete instance of `RepositoryConfigInput` via:

RepositoryConfigArgs{...}

type RepositoryConfigOutput added in v0.32.0

type RepositoryConfigOutput struct{ *pulumi.OutputState }

Configuration for dependency repositories

func (RepositoryConfigOutput) ElementType added in v0.32.0

func (RepositoryConfigOutput) ElementType() reflect.Type

func (RepositoryConfigOutput) PypiRepositoryConfig added in v0.32.0

func (o RepositoryConfigOutput) PypiRepositoryConfig() PyPiRepositoryConfigPtrOutput

Optional. Configuration for PyPi repository.

func (RepositoryConfigOutput) ToRepositoryConfigOutput added in v0.32.0

func (o RepositoryConfigOutput) ToRepositoryConfigOutput() RepositoryConfigOutput

func (RepositoryConfigOutput) ToRepositoryConfigOutputWithContext added in v0.32.0

func (o RepositoryConfigOutput) ToRepositoryConfigOutputWithContext(ctx context.Context) RepositoryConfigOutput

func (RepositoryConfigOutput) ToRepositoryConfigPtrOutput added in v0.32.0

func (o RepositoryConfigOutput) ToRepositoryConfigPtrOutput() RepositoryConfigPtrOutput

func (RepositoryConfigOutput) ToRepositoryConfigPtrOutputWithContext added in v0.32.0

func (o RepositoryConfigOutput) ToRepositoryConfigPtrOutputWithContext(ctx context.Context) RepositoryConfigPtrOutput

type RepositoryConfigPtrInput added in v0.32.0

type RepositoryConfigPtrInput interface {
	pulumi.Input

	ToRepositoryConfigPtrOutput() RepositoryConfigPtrOutput
	ToRepositoryConfigPtrOutputWithContext(context.Context) RepositoryConfigPtrOutput
}

RepositoryConfigPtrInput is an input type that accepts RepositoryConfigArgs, RepositoryConfigPtr and RepositoryConfigPtrOutput values. You can construct a concrete instance of `RepositoryConfigPtrInput` via:

        RepositoryConfigArgs{...}

or:

        nil

func RepositoryConfigPtr added in v0.32.0

func RepositoryConfigPtr(v *RepositoryConfigArgs) RepositoryConfigPtrInput

type RepositoryConfigPtrOutput added in v0.32.0

type RepositoryConfigPtrOutput struct{ *pulumi.OutputState }

func (RepositoryConfigPtrOutput) Elem added in v0.32.0

func (RepositoryConfigPtrOutput) ElementType added in v0.32.0

func (RepositoryConfigPtrOutput) ElementType() reflect.Type

func (RepositoryConfigPtrOutput) PypiRepositoryConfig added in v0.32.0

Optional. Configuration for PyPi repository.

func (RepositoryConfigPtrOutput) ToRepositoryConfigPtrOutput added in v0.32.0

func (o RepositoryConfigPtrOutput) ToRepositoryConfigPtrOutput() RepositoryConfigPtrOutput

func (RepositoryConfigPtrOutput) ToRepositoryConfigPtrOutputWithContext added in v0.32.0

func (o RepositoryConfigPtrOutput) ToRepositoryConfigPtrOutputWithContext(ctx context.Context) RepositoryConfigPtrOutput

type RepositoryConfigResponse added in v0.32.0

type RepositoryConfigResponse struct {
	// Optional. Configuration for PyPi repository.
	PypiRepositoryConfig PyPiRepositoryConfigResponse `pulumi:"pypiRepositoryConfig"`
}

Configuration for dependency repositories

type RepositoryConfigResponseOutput added in v0.32.0

type RepositoryConfigResponseOutput struct{ *pulumi.OutputState }

Configuration for dependency repositories

func (RepositoryConfigResponseOutput) ElementType added in v0.32.0

func (RepositoryConfigResponseOutput) PypiRepositoryConfig added in v0.32.0

Optional. Configuration for PyPi repository.

func (RepositoryConfigResponseOutput) ToRepositoryConfigResponseOutput added in v0.32.0

func (o RepositoryConfigResponseOutput) ToRepositoryConfigResponseOutput() RepositoryConfigResponseOutput

func (RepositoryConfigResponseOutput) ToRepositoryConfigResponseOutputWithContext added in v0.32.0

func (o RepositoryConfigResponseOutput) ToRepositoryConfigResponseOutputWithContext(ctx context.Context) RepositoryConfigResponseOutput

type ReservationAffinity

type ReservationAffinity struct {
	// Optional. Type of reservation to consume
	ConsumeReservationType *ReservationAffinityConsumeReservationType `pulumi:"consumeReservationType"`
	// Optional. Corresponds to the label key of reservation resource.
	Key *string `pulumi:"key"`
	// Optional. Corresponds to the label values of reservation resource.
	Values []string `pulumi:"values"`
}

Reservation Affinity for consuming Zonal reservation.

type ReservationAffinityArgs

type ReservationAffinityArgs struct {
	// Optional. Type of reservation to consume
	ConsumeReservationType ReservationAffinityConsumeReservationTypePtrInput `pulumi:"consumeReservationType"`
	// Optional. Corresponds to the label key of reservation resource.
	Key pulumi.StringPtrInput `pulumi:"key"`
	// Optional. Corresponds to the label values of reservation resource.
	Values pulumi.StringArrayInput `pulumi:"values"`
}

Reservation Affinity for consuming Zonal reservation.

func (ReservationAffinityArgs) ElementType

func (ReservationAffinityArgs) ElementType() reflect.Type

func (ReservationAffinityArgs) ToReservationAffinityOutput

func (i ReservationAffinityArgs) ToReservationAffinityOutput() ReservationAffinityOutput

func (ReservationAffinityArgs) ToReservationAffinityOutputWithContext

func (i ReservationAffinityArgs) ToReservationAffinityOutputWithContext(ctx context.Context) ReservationAffinityOutput

func (ReservationAffinityArgs) ToReservationAffinityPtrOutput

func (i ReservationAffinityArgs) ToReservationAffinityPtrOutput() ReservationAffinityPtrOutput

func (ReservationAffinityArgs) ToReservationAffinityPtrOutputWithContext

func (i ReservationAffinityArgs) ToReservationAffinityPtrOutputWithContext(ctx context.Context) ReservationAffinityPtrOutput

type ReservationAffinityConsumeReservationType added in v0.4.0

type ReservationAffinityConsumeReservationType string

Optional. Type of reservation to consume

func (ReservationAffinityConsumeReservationType) ElementType added in v0.4.0

func (ReservationAffinityConsumeReservationType) ToReservationAffinityConsumeReservationTypeOutput added in v0.6.0

func (e ReservationAffinityConsumeReservationType) ToReservationAffinityConsumeReservationTypeOutput() ReservationAffinityConsumeReservationTypeOutput

func (ReservationAffinityConsumeReservationType) ToReservationAffinityConsumeReservationTypeOutputWithContext added in v0.6.0

func (e ReservationAffinityConsumeReservationType) ToReservationAffinityConsumeReservationTypeOutputWithContext(ctx context.Context) ReservationAffinityConsumeReservationTypeOutput

func (ReservationAffinityConsumeReservationType) ToReservationAffinityConsumeReservationTypePtrOutput added in v0.6.0

func (e ReservationAffinityConsumeReservationType) ToReservationAffinityConsumeReservationTypePtrOutput() ReservationAffinityConsumeReservationTypePtrOutput

func (ReservationAffinityConsumeReservationType) ToReservationAffinityConsumeReservationTypePtrOutputWithContext added in v0.6.0

func (e ReservationAffinityConsumeReservationType) ToReservationAffinityConsumeReservationTypePtrOutputWithContext(ctx context.Context) ReservationAffinityConsumeReservationTypePtrOutput

func (ReservationAffinityConsumeReservationType) ToStringOutput added in v0.4.0

func (ReservationAffinityConsumeReservationType) ToStringOutputWithContext added in v0.4.0

func (ReservationAffinityConsumeReservationType) ToStringPtrOutput added in v0.4.0

func (ReservationAffinityConsumeReservationType) ToStringPtrOutputWithContext added in v0.4.0

type ReservationAffinityConsumeReservationTypeInput added in v0.6.0

type ReservationAffinityConsumeReservationTypeInput interface {
	pulumi.Input

	ToReservationAffinityConsumeReservationTypeOutput() ReservationAffinityConsumeReservationTypeOutput
	ToReservationAffinityConsumeReservationTypeOutputWithContext(context.Context) ReservationAffinityConsumeReservationTypeOutput
}

ReservationAffinityConsumeReservationTypeInput is an input type that accepts ReservationAffinityConsumeReservationTypeArgs and ReservationAffinityConsumeReservationTypeOutput values. You can construct a concrete instance of `ReservationAffinityConsumeReservationTypeInput` via:

ReservationAffinityConsumeReservationTypeArgs{...}

type ReservationAffinityConsumeReservationTypeOutput added in v0.6.0

type ReservationAffinityConsumeReservationTypeOutput struct{ *pulumi.OutputState }

func (ReservationAffinityConsumeReservationTypeOutput) ElementType added in v0.6.0

func (ReservationAffinityConsumeReservationTypeOutput) ToReservationAffinityConsumeReservationTypeOutput added in v0.6.0

func (o ReservationAffinityConsumeReservationTypeOutput) ToReservationAffinityConsumeReservationTypeOutput() ReservationAffinityConsumeReservationTypeOutput

func (ReservationAffinityConsumeReservationTypeOutput) ToReservationAffinityConsumeReservationTypeOutputWithContext added in v0.6.0

func (o ReservationAffinityConsumeReservationTypeOutput) ToReservationAffinityConsumeReservationTypeOutputWithContext(ctx context.Context) ReservationAffinityConsumeReservationTypeOutput

func (ReservationAffinityConsumeReservationTypeOutput) ToReservationAffinityConsumeReservationTypePtrOutput added in v0.6.0

func (o ReservationAffinityConsumeReservationTypeOutput) ToReservationAffinityConsumeReservationTypePtrOutput() ReservationAffinityConsumeReservationTypePtrOutput

func (ReservationAffinityConsumeReservationTypeOutput) ToReservationAffinityConsumeReservationTypePtrOutputWithContext added in v0.6.0

func (o ReservationAffinityConsumeReservationTypeOutput) ToReservationAffinityConsumeReservationTypePtrOutputWithContext(ctx context.Context) ReservationAffinityConsumeReservationTypePtrOutput

func (ReservationAffinityConsumeReservationTypeOutput) ToStringOutput added in v0.6.0

func (ReservationAffinityConsumeReservationTypeOutput) ToStringOutputWithContext added in v0.6.0

func (ReservationAffinityConsumeReservationTypeOutput) ToStringPtrOutput added in v0.6.0

func (ReservationAffinityConsumeReservationTypeOutput) ToStringPtrOutputWithContext added in v0.6.0

type ReservationAffinityConsumeReservationTypePtrInput added in v0.6.0

type ReservationAffinityConsumeReservationTypePtrInput interface {
	pulumi.Input

	ToReservationAffinityConsumeReservationTypePtrOutput() ReservationAffinityConsumeReservationTypePtrOutput
	ToReservationAffinityConsumeReservationTypePtrOutputWithContext(context.Context) ReservationAffinityConsumeReservationTypePtrOutput
}

func ReservationAffinityConsumeReservationTypePtr added in v0.6.0

func ReservationAffinityConsumeReservationTypePtr(v string) ReservationAffinityConsumeReservationTypePtrInput

type ReservationAffinityConsumeReservationTypePtrOutput added in v0.6.0

type ReservationAffinityConsumeReservationTypePtrOutput struct{ *pulumi.OutputState }

func (ReservationAffinityConsumeReservationTypePtrOutput) Elem added in v0.6.0

func (ReservationAffinityConsumeReservationTypePtrOutput) ElementType added in v0.6.0

func (ReservationAffinityConsumeReservationTypePtrOutput) ToReservationAffinityConsumeReservationTypePtrOutput added in v0.6.0

func (o ReservationAffinityConsumeReservationTypePtrOutput) ToReservationAffinityConsumeReservationTypePtrOutput() ReservationAffinityConsumeReservationTypePtrOutput

func (ReservationAffinityConsumeReservationTypePtrOutput) ToReservationAffinityConsumeReservationTypePtrOutputWithContext added in v0.6.0

func (o ReservationAffinityConsumeReservationTypePtrOutput) ToReservationAffinityConsumeReservationTypePtrOutputWithContext(ctx context.Context) ReservationAffinityConsumeReservationTypePtrOutput

func (ReservationAffinityConsumeReservationTypePtrOutput) ToStringPtrOutput added in v0.6.0

func (ReservationAffinityConsumeReservationTypePtrOutput) ToStringPtrOutputWithContext added in v0.6.0

type ReservationAffinityInput

type ReservationAffinityInput interface {
	pulumi.Input

	ToReservationAffinityOutput() ReservationAffinityOutput
	ToReservationAffinityOutputWithContext(context.Context) ReservationAffinityOutput
}

ReservationAffinityInput is an input type that accepts ReservationAffinityArgs and ReservationAffinityOutput values. You can construct a concrete instance of `ReservationAffinityInput` via:

ReservationAffinityArgs{...}

type ReservationAffinityOutput

type ReservationAffinityOutput struct{ *pulumi.OutputState }

Reservation Affinity for consuming Zonal reservation.

func (ReservationAffinityOutput) ConsumeReservationType

Optional. Type of reservation to consume

func (ReservationAffinityOutput) ElementType

func (ReservationAffinityOutput) ElementType() reflect.Type

func (ReservationAffinityOutput) Key

Optional. Corresponds to the label key of reservation resource.

func (ReservationAffinityOutput) ToReservationAffinityOutput

func (o ReservationAffinityOutput) ToReservationAffinityOutput() ReservationAffinityOutput

func (ReservationAffinityOutput) ToReservationAffinityOutputWithContext

func (o ReservationAffinityOutput) ToReservationAffinityOutputWithContext(ctx context.Context) ReservationAffinityOutput

func (ReservationAffinityOutput) ToReservationAffinityPtrOutput

func (o ReservationAffinityOutput) ToReservationAffinityPtrOutput() ReservationAffinityPtrOutput

func (ReservationAffinityOutput) ToReservationAffinityPtrOutputWithContext

func (o ReservationAffinityOutput) ToReservationAffinityPtrOutputWithContext(ctx context.Context) ReservationAffinityPtrOutput

func (ReservationAffinityOutput) Values

Optional. Corresponds to the label values of reservation resource.

type ReservationAffinityPtrInput

type ReservationAffinityPtrInput interface {
	pulumi.Input

	ToReservationAffinityPtrOutput() ReservationAffinityPtrOutput
	ToReservationAffinityPtrOutputWithContext(context.Context) ReservationAffinityPtrOutput
}

ReservationAffinityPtrInput is an input type that accepts ReservationAffinityArgs, ReservationAffinityPtr and ReservationAffinityPtrOutput values. You can construct a concrete instance of `ReservationAffinityPtrInput` via:

        ReservationAffinityArgs{...}

or:

        nil

type ReservationAffinityPtrOutput

type ReservationAffinityPtrOutput struct{ *pulumi.OutputState }

func (ReservationAffinityPtrOutput) ConsumeReservationType

Optional. Type of reservation to consume

func (ReservationAffinityPtrOutput) Elem

func (ReservationAffinityPtrOutput) ElementType

func (ReservationAffinityPtrOutput) Key

Optional. Corresponds to the label key of reservation resource.

func (ReservationAffinityPtrOutput) ToReservationAffinityPtrOutput

func (o ReservationAffinityPtrOutput) ToReservationAffinityPtrOutput() ReservationAffinityPtrOutput

func (ReservationAffinityPtrOutput) ToReservationAffinityPtrOutputWithContext

func (o ReservationAffinityPtrOutput) ToReservationAffinityPtrOutputWithContext(ctx context.Context) ReservationAffinityPtrOutput

func (ReservationAffinityPtrOutput) Values

Optional. Corresponds to the label values of reservation resource.

type ReservationAffinityResponse

type ReservationAffinityResponse struct {
	// Optional. Type of reservation to consume
	ConsumeReservationType string `pulumi:"consumeReservationType"`
	// Optional. Corresponds to the label key of reservation resource.
	Key string `pulumi:"key"`
	// Optional. Corresponds to the label values of reservation resource.
	Values []string `pulumi:"values"`
}

Reservation Affinity for consuming Zonal reservation.

type ReservationAffinityResponseOutput

type ReservationAffinityResponseOutput struct{ *pulumi.OutputState }

Reservation Affinity for consuming Zonal reservation.

func (ReservationAffinityResponseOutput) ConsumeReservationType

func (o ReservationAffinityResponseOutput) ConsumeReservationType() pulumi.StringOutput

Optional. Type of reservation to consume

func (ReservationAffinityResponseOutput) ElementType

func (ReservationAffinityResponseOutput) Key

Optional. Corresponds to the label key of reservation resource.

func (ReservationAffinityResponseOutput) ToReservationAffinityResponseOutput

func (o ReservationAffinityResponseOutput) ToReservationAffinityResponseOutput() ReservationAffinityResponseOutput

func (ReservationAffinityResponseOutput) ToReservationAffinityResponseOutputWithContext

func (o ReservationAffinityResponseOutput) ToReservationAffinityResponseOutputWithContext(ctx context.Context) ReservationAffinityResponseOutput

func (ReservationAffinityResponseOutput) Values

Optional. Corresponds to the label values of reservation resource.

type RuntimeConfig added in v0.12.0

type RuntimeConfig struct {
	// Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used.
	ContainerImage *string `pulumi:"containerImage"`
	// Optional. A mapping of property names to values, which are used to configure workload execution.
	Properties map[string]string `pulumi:"properties"`
	// Optional. Dependency repository configuration.
	RepositoryConfig *RepositoryConfig `pulumi:"repositoryConfig"`
	// Optional. Version of the batch runtime.
	Version *string `pulumi:"version"`
}

Runtime configuration for a workload.

type RuntimeConfigArgs added in v0.12.0

type RuntimeConfigArgs struct {
	// Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used.
	ContainerImage pulumi.StringPtrInput `pulumi:"containerImage"`
	// Optional. A mapping of property names to values, which are used to configure workload execution.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// Optional. Dependency repository configuration.
	RepositoryConfig RepositoryConfigPtrInput `pulumi:"repositoryConfig"`
	// Optional. Version of the batch runtime.
	Version pulumi.StringPtrInput `pulumi:"version"`
}

Runtime configuration for a workload.

func (RuntimeConfigArgs) ElementType added in v0.12.0

func (RuntimeConfigArgs) ElementType() reflect.Type

func (RuntimeConfigArgs) ToRuntimeConfigOutput added in v0.12.0

func (i RuntimeConfigArgs) ToRuntimeConfigOutput() RuntimeConfigOutput

func (RuntimeConfigArgs) ToRuntimeConfigOutputWithContext added in v0.12.0

func (i RuntimeConfigArgs) ToRuntimeConfigOutputWithContext(ctx context.Context) RuntimeConfigOutput

func (RuntimeConfigArgs) ToRuntimeConfigPtrOutput added in v0.12.0

func (i RuntimeConfigArgs) ToRuntimeConfigPtrOutput() RuntimeConfigPtrOutput

func (RuntimeConfigArgs) ToRuntimeConfigPtrOutputWithContext added in v0.12.0

func (i RuntimeConfigArgs) ToRuntimeConfigPtrOutputWithContext(ctx context.Context) RuntimeConfigPtrOutput

type RuntimeConfigInput added in v0.12.0

type RuntimeConfigInput interface {
	pulumi.Input

	ToRuntimeConfigOutput() RuntimeConfigOutput
	ToRuntimeConfigOutputWithContext(context.Context) RuntimeConfigOutput
}

RuntimeConfigInput is an input type that accepts RuntimeConfigArgs and RuntimeConfigOutput values. You can construct a concrete instance of `RuntimeConfigInput` via:

RuntimeConfigArgs{...}

type RuntimeConfigOutput added in v0.12.0

type RuntimeConfigOutput struct{ *pulumi.OutputState }

Runtime configuration for a workload.

func (RuntimeConfigOutput) ContainerImage added in v0.12.0

func (o RuntimeConfigOutput) ContainerImage() pulumi.StringPtrOutput

Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used.

func (RuntimeConfigOutput) ElementType added in v0.12.0

func (RuntimeConfigOutput) ElementType() reflect.Type

func (RuntimeConfigOutput) Properties added in v0.12.0

Optional. A mapping of property names to values, which are used to configure workload execution.

func (RuntimeConfigOutput) RepositoryConfig added in v0.32.0

func (o RuntimeConfigOutput) RepositoryConfig() RepositoryConfigPtrOutput

Optional. Dependency repository configuration.

func (RuntimeConfigOutput) ToRuntimeConfigOutput added in v0.12.0

func (o RuntimeConfigOutput) ToRuntimeConfigOutput() RuntimeConfigOutput

func (RuntimeConfigOutput) ToRuntimeConfigOutputWithContext added in v0.12.0

func (o RuntimeConfigOutput) ToRuntimeConfigOutputWithContext(ctx context.Context) RuntimeConfigOutput

func (RuntimeConfigOutput) ToRuntimeConfigPtrOutput added in v0.12.0

func (o RuntimeConfigOutput) ToRuntimeConfigPtrOutput() RuntimeConfigPtrOutput

func (RuntimeConfigOutput) ToRuntimeConfigPtrOutputWithContext added in v0.12.0

func (o RuntimeConfigOutput) ToRuntimeConfigPtrOutputWithContext(ctx context.Context) RuntimeConfigPtrOutput

func (RuntimeConfigOutput) Version added in v0.12.0

Optional. Version of the batch runtime.

type RuntimeConfigPtrInput added in v0.12.0

type RuntimeConfigPtrInput interface {
	pulumi.Input

	ToRuntimeConfigPtrOutput() RuntimeConfigPtrOutput
	ToRuntimeConfigPtrOutputWithContext(context.Context) RuntimeConfigPtrOutput
}

RuntimeConfigPtrInput is an input type that accepts RuntimeConfigArgs, RuntimeConfigPtr and RuntimeConfigPtrOutput values. You can construct a concrete instance of `RuntimeConfigPtrInput` via:

        RuntimeConfigArgs{...}

or:

        nil

func RuntimeConfigPtr added in v0.12.0

func RuntimeConfigPtr(v *RuntimeConfigArgs) RuntimeConfigPtrInput

type RuntimeConfigPtrOutput added in v0.12.0

type RuntimeConfigPtrOutput struct{ *pulumi.OutputState }

func (RuntimeConfigPtrOutput) ContainerImage added in v0.12.0

func (o RuntimeConfigPtrOutput) ContainerImage() pulumi.StringPtrOutput

Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used.

func (RuntimeConfigPtrOutput) Elem added in v0.12.0

func (RuntimeConfigPtrOutput) ElementType added in v0.12.0

func (RuntimeConfigPtrOutput) ElementType() reflect.Type

func (RuntimeConfigPtrOutput) Properties added in v0.12.0

Optional. A mapping of property names to values, which are used to configure workload execution.

func (RuntimeConfigPtrOutput) RepositoryConfig added in v0.32.0

Optional. Dependency repository configuration.

func (RuntimeConfigPtrOutput) ToRuntimeConfigPtrOutput added in v0.12.0

func (o RuntimeConfigPtrOutput) ToRuntimeConfigPtrOutput() RuntimeConfigPtrOutput

func (RuntimeConfigPtrOutput) ToRuntimeConfigPtrOutputWithContext added in v0.12.0

func (o RuntimeConfigPtrOutput) ToRuntimeConfigPtrOutputWithContext(ctx context.Context) RuntimeConfigPtrOutput

func (RuntimeConfigPtrOutput) Version added in v0.12.0

Optional. Version of the batch runtime.

type RuntimeConfigResponse added in v0.12.0

type RuntimeConfigResponse struct {
	// Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used.
	ContainerImage string `pulumi:"containerImage"`
	// Optional. A mapping of property names to values, which are used to configure workload execution.
	Properties map[string]string `pulumi:"properties"`
	// Optional. Dependency repository configuration.
	RepositoryConfig RepositoryConfigResponse `pulumi:"repositoryConfig"`
	// Optional. Version of the batch runtime.
	Version string `pulumi:"version"`
}

Runtime configuration for a workload.

type RuntimeConfigResponseOutput added in v0.12.0

type RuntimeConfigResponseOutput struct{ *pulumi.OutputState }

Runtime configuration for a workload.

func (RuntimeConfigResponseOutput) ContainerImage added in v0.12.0

func (o RuntimeConfigResponseOutput) ContainerImage() pulumi.StringOutput

Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used.

func (RuntimeConfigResponseOutput) ElementType added in v0.12.0

func (RuntimeConfigResponseOutput) Properties added in v0.12.0

Optional. A mapping of property names to values, which are used to configure workload execution.

func (RuntimeConfigResponseOutput) RepositoryConfig added in v0.32.0

Optional. Dependency repository configuration.

func (RuntimeConfigResponseOutput) ToRuntimeConfigResponseOutput added in v0.12.0

func (o RuntimeConfigResponseOutput) ToRuntimeConfigResponseOutput() RuntimeConfigResponseOutput

func (RuntimeConfigResponseOutput) ToRuntimeConfigResponseOutputWithContext added in v0.12.0

func (o RuntimeConfigResponseOutput) ToRuntimeConfigResponseOutputWithContext(ctx context.Context) RuntimeConfigResponseOutput

func (RuntimeConfigResponseOutput) Version added in v0.12.0

Optional. Version of the batch runtime.

type RuntimeInfoResponse added in v0.12.0

type RuntimeInfoResponse struct {
	// Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments).
	ApproximateUsage UsageMetricsResponse `pulumi:"approximateUsage"`
	// Snapshot of current workload resource usage.
	CurrentUsage UsageSnapshotResponse `pulumi:"currentUsage"`
	// A URI pointing to the location of the diagnostics tarball.
	DiagnosticOutputUri string `pulumi:"diagnosticOutputUri"`
	// Map of remote access endpoints (such as web interfaces and APIs) to their URIs.
	Endpoints map[string]string `pulumi:"endpoints"`
	// A URI pointing to the location of the stdout and stderr of the workload.
	OutputUri string `pulumi:"outputUri"`
}

Runtime information about workload execution.

type RuntimeInfoResponseOutput added in v0.12.0

type RuntimeInfoResponseOutput struct{ *pulumi.OutputState }

Runtime information about workload execution.

func (RuntimeInfoResponseOutput) ApproximateUsage added in v0.28.0

Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments).

func (RuntimeInfoResponseOutput) CurrentUsage added in v0.28.0

Snapshot of current workload resource usage.

func (RuntimeInfoResponseOutput) DiagnosticOutputUri added in v0.12.0

func (o RuntimeInfoResponseOutput) DiagnosticOutputUri() pulumi.StringOutput

A URI pointing to the location of the diagnostics tarball.

func (RuntimeInfoResponseOutput) ElementType added in v0.12.0

func (RuntimeInfoResponseOutput) ElementType() reflect.Type

func (RuntimeInfoResponseOutput) Endpoints added in v0.12.0

Map of remote access endpoints (such as web interfaces and APIs) to their URIs.

func (RuntimeInfoResponseOutput) OutputUri added in v0.12.0

A URI pointing to the location of the stdout and stderr of the workload.

func (RuntimeInfoResponseOutput) ToRuntimeInfoResponseOutput added in v0.12.0

func (o RuntimeInfoResponseOutput) ToRuntimeInfoResponseOutput() RuntimeInfoResponseOutput

func (RuntimeInfoResponseOutput) ToRuntimeInfoResponseOutputWithContext added in v0.12.0

func (o RuntimeInfoResponseOutput) ToRuntimeInfoResponseOutputWithContext(ctx context.Context) RuntimeInfoResponseOutput

type SecurityConfig

type SecurityConfig struct {
	// Optional. Identity related configuration, including service account based secure multi-tenancy user mappings.
	IdentityConfig *IdentityConfig `pulumi:"identityConfig"`
	// Optional. Kerberos related configuration.
	KerberosConfig *KerberosConfig `pulumi:"kerberosConfig"`
}

Security related configuration, including encryption, Kerberos, etc.

type SecurityConfigArgs

type SecurityConfigArgs struct {
	// Optional. Identity related configuration, including service account based secure multi-tenancy user mappings.
	IdentityConfig IdentityConfigPtrInput `pulumi:"identityConfig"`
	// Optional. Kerberos related configuration.
	KerberosConfig KerberosConfigPtrInput `pulumi:"kerberosConfig"`
}

Security related configuration, including encryption, Kerberos, etc.

func (SecurityConfigArgs) ElementType

func (SecurityConfigArgs) ElementType() reflect.Type

func (SecurityConfigArgs) ToSecurityConfigOutput

func (i SecurityConfigArgs) ToSecurityConfigOutput() SecurityConfigOutput

func (SecurityConfigArgs) ToSecurityConfigOutputWithContext

func (i SecurityConfigArgs) ToSecurityConfigOutputWithContext(ctx context.Context) SecurityConfigOutput

func (SecurityConfigArgs) ToSecurityConfigPtrOutput

func (i SecurityConfigArgs) ToSecurityConfigPtrOutput() SecurityConfigPtrOutput

func (SecurityConfigArgs) ToSecurityConfigPtrOutputWithContext

func (i SecurityConfigArgs) ToSecurityConfigPtrOutputWithContext(ctx context.Context) SecurityConfigPtrOutput

type SecurityConfigInput

type SecurityConfigInput interface {
	pulumi.Input

	ToSecurityConfigOutput() SecurityConfigOutput
	ToSecurityConfigOutputWithContext(context.Context) SecurityConfigOutput
}

SecurityConfigInput is an input type that accepts SecurityConfigArgs and SecurityConfigOutput values. You can construct a concrete instance of `SecurityConfigInput` via:

SecurityConfigArgs{...}

type SecurityConfigOutput

type SecurityConfigOutput struct{ *pulumi.OutputState }

Security related configuration, including encryption, Kerberos, etc.

func (SecurityConfigOutput) ElementType

func (SecurityConfigOutput) ElementType() reflect.Type

func (SecurityConfigOutput) IdentityConfig

func (o SecurityConfigOutput) IdentityConfig() IdentityConfigPtrOutput

Optional. Identity related configuration, including service account based secure multi-tenancy user mappings.

func (SecurityConfigOutput) KerberosConfig

func (o SecurityConfigOutput) KerberosConfig() KerberosConfigPtrOutput

Optional. Kerberos related configuration.

func (SecurityConfigOutput) ToSecurityConfigOutput

func (o SecurityConfigOutput) ToSecurityConfigOutput() SecurityConfigOutput

func (SecurityConfigOutput) ToSecurityConfigOutputWithContext

func (o SecurityConfigOutput) ToSecurityConfigOutputWithContext(ctx context.Context) SecurityConfigOutput

func (SecurityConfigOutput) ToSecurityConfigPtrOutput

func (o SecurityConfigOutput) ToSecurityConfigPtrOutput() SecurityConfigPtrOutput

func (SecurityConfigOutput) ToSecurityConfigPtrOutputWithContext

func (o SecurityConfigOutput) ToSecurityConfigPtrOutputWithContext(ctx context.Context) SecurityConfigPtrOutput

type SecurityConfigPtrInput

type SecurityConfigPtrInput interface {
	pulumi.Input

	ToSecurityConfigPtrOutput() SecurityConfigPtrOutput
	ToSecurityConfigPtrOutputWithContext(context.Context) SecurityConfigPtrOutput
}

SecurityConfigPtrInput is an input type that accepts SecurityConfigArgs, SecurityConfigPtr and SecurityConfigPtrOutput values. You can construct a concrete instance of `SecurityConfigPtrInput` via:

        SecurityConfigArgs{...}

or:

        nil

type SecurityConfigPtrOutput

type SecurityConfigPtrOutput struct{ *pulumi.OutputState }

func (SecurityConfigPtrOutput) Elem

func (SecurityConfigPtrOutput) ElementType

func (SecurityConfigPtrOutput) ElementType() reflect.Type

func (SecurityConfigPtrOutput) IdentityConfig

Optional. Identity related configuration, including service account based secure multi-tenancy user mappings.

func (SecurityConfigPtrOutput) KerberosConfig

Optional. Kerberos related configuration.

func (SecurityConfigPtrOutput) ToSecurityConfigPtrOutput

func (o SecurityConfigPtrOutput) ToSecurityConfigPtrOutput() SecurityConfigPtrOutput

func (SecurityConfigPtrOutput) ToSecurityConfigPtrOutputWithContext

func (o SecurityConfigPtrOutput) ToSecurityConfigPtrOutputWithContext(ctx context.Context) SecurityConfigPtrOutput

type SecurityConfigResponse

type SecurityConfigResponse struct {
	// Optional. Identity related configuration, including service account based secure multi-tenancy user mappings.
	IdentityConfig IdentityConfigResponse `pulumi:"identityConfig"`
	// Optional. Kerberos related configuration.
	KerberosConfig KerberosConfigResponse `pulumi:"kerberosConfig"`
}

Security related configuration, including encryption, Kerberos, etc.

type SecurityConfigResponseOutput

type SecurityConfigResponseOutput struct{ *pulumi.OutputState }

Security related configuration, including encryption, Kerberos, etc.

func (SecurityConfigResponseOutput) ElementType

func (SecurityConfigResponseOutput) IdentityConfig

Optional. Identity related configuration, including service account based secure multi-tenancy user mappings.

func (SecurityConfigResponseOutput) KerberosConfig

Optional. Kerberos related configuration.

func (SecurityConfigResponseOutput) ToSecurityConfigResponseOutput

func (o SecurityConfigResponseOutput) ToSecurityConfigResponseOutput() SecurityConfigResponseOutput

func (SecurityConfigResponseOutput) ToSecurityConfigResponseOutputWithContext

func (o SecurityConfigResponseOutput) ToSecurityConfigResponseOutputWithContext(ctx context.Context) SecurityConfigResponseOutput

type Session added in v0.32.0

type Session struct {
	pulumi.CustomResourceState

	// The time when the session was created.
	CreateTime pulumi.StringOutput `pulumi:"createTime"`
	// The email address of the user who created the session.
	Creator pulumi.StringOutput `pulumi:"creator"`
	// Optional. Environment configuration for the session execution.
	EnvironmentConfig EnvironmentConfigResponseOutput `pulumi:"environmentConfig"`
	// Optional. Jupyter session config.
	JupyterSession JupyterConfigResponseOutput `pulumi:"jupyterSession"`
	// Optional. The labels to associate with the session. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.
	Labels   pulumi.StringMapOutput `pulumi:"labels"`
	Location pulumi.StringOutput    `pulumi:"location"`
	// The resource name of the session.
	Name    pulumi.StringOutput `pulumi:"name"`
	Project pulumi.StringOutput `pulumi:"project"`
	// Optional. A unique ID used to identify the request. If the service receives two CreateSessionRequests (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateSessionRequest)s with the same ID, the second request is ignored, and the first Session is created and stored in the backend.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId pulumi.StringPtrOutput `pulumi:"requestId"`
	// Optional. Runtime configuration for the session execution.
	RuntimeConfig RuntimeConfigResponseOutput `pulumi:"runtimeConfig"`
	// Runtime information about session execution.
	RuntimeInfo RuntimeInfoResponseOutput `pulumi:"runtimeInfo"`
	// Required. The ID to use for the session, which becomes the final component of the session's resource name.This value must be 4-63 characters. Valid characters are /a-z-/.
	SessionId pulumi.StringOutput `pulumi:"sessionId"`
	// Optional. The session template used by the session.Only resource names, including project ID and location, are valid.Example: * https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id] * projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id]The template must be in the same project and Dataproc region as the session.
	SessionTemplate pulumi.StringOutput `pulumi:"sessionTemplate"`
	// A state of the session.
	State pulumi.StringOutput `pulumi:"state"`
	// Historical state information for the session.
	StateHistory SessionStateHistoryResponseArrayOutput `pulumi:"stateHistory"`
	// Session state details, such as the failure description if the state is FAILED.
	StateMessage pulumi.StringOutput `pulumi:"stateMessage"`
	// The time when the session entered the current state.
	StateTime pulumi.StringOutput `pulumi:"stateTime"`
	// Optional. The email address of the user who owns the session.
	User pulumi.StringOutput `pulumi:"user"`
	// A session UUID (Unique Universal Identifier). The service generates this value when it creates the session.
	Uuid pulumi.StringOutput `pulumi:"uuid"`
}

Create an interactive session asynchronously.

func GetSession added in v0.32.0

func GetSession(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *SessionState, opts ...pulumi.ResourceOption) (*Session, error)

GetSession gets an existing Session resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewSession added in v0.32.0

func NewSession(ctx *pulumi.Context,
	name string, args *SessionArgs, opts ...pulumi.ResourceOption) (*Session, error)

NewSession registers a new resource with the given unique name, arguments, and options.

func (*Session) ElementType added in v0.32.0

func (*Session) ElementType() reflect.Type

func (*Session) ToSessionOutput added in v0.32.0

func (i *Session) ToSessionOutput() SessionOutput

func (*Session) ToSessionOutputWithContext added in v0.32.0

func (i *Session) ToSessionOutputWithContext(ctx context.Context) SessionOutput

type SessionArgs added in v0.32.0

type SessionArgs struct {
	// Optional. Environment configuration for the session execution.
	EnvironmentConfig EnvironmentConfigPtrInput
	// Optional. Jupyter session config.
	JupyterSession JupyterConfigPtrInput
	// Optional. The labels to associate with the session. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.
	Labels   pulumi.StringMapInput
	Location pulumi.StringPtrInput
	// The resource name of the session.
	Name    pulumi.StringPtrInput
	Project pulumi.StringPtrInput
	// Optional. A unique ID used to identify the request. If the service receives two CreateSessionRequests (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateSessionRequest)s with the same ID, the second request is ignored, and the first Session is created and stored in the backend.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId pulumi.StringPtrInput
	// Optional. Runtime configuration for the session execution.
	RuntimeConfig RuntimeConfigPtrInput
	// Required. The ID to use for the session, which becomes the final component of the session's resource name.This value must be 4-63 characters. Valid characters are /a-z-/.
	SessionId pulumi.StringInput
	// Optional. The session template used by the session.Only resource names, including project ID and location, are valid.Example: * https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id] * projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id]The template must be in the same project and Dataproc region as the session.
	SessionTemplate pulumi.StringPtrInput
	// Optional. The email address of the user who owns the session.
	User pulumi.StringPtrInput
}

The set of arguments for constructing a Session resource.

func (SessionArgs) ElementType added in v0.32.0

func (SessionArgs) ElementType() reflect.Type

type SessionInput added in v0.32.0

type SessionInput interface {
	pulumi.Input

	ToSessionOutput() SessionOutput
	ToSessionOutputWithContext(ctx context.Context) SessionOutput
}

type SessionOutput added in v0.32.0

type SessionOutput struct{ *pulumi.OutputState }

func (SessionOutput) CreateTime added in v0.32.0

func (o SessionOutput) CreateTime() pulumi.StringOutput

The time when the session was created.

func (SessionOutput) Creator added in v0.32.0

func (o SessionOutput) Creator() pulumi.StringOutput

The email address of the user who created the session.

func (SessionOutput) ElementType added in v0.32.0

func (SessionOutput) ElementType() reflect.Type

func (SessionOutput) EnvironmentConfig added in v0.32.0

func (o SessionOutput) EnvironmentConfig() EnvironmentConfigResponseOutput

Optional. Environment configuration for the session execution.

func (SessionOutput) JupyterSession added in v0.32.0

func (o SessionOutput) JupyterSession() JupyterConfigResponseOutput

Optional. Jupyter session config.

func (SessionOutput) Labels added in v0.32.0

Optional. The labels to associate with the session. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.

func (SessionOutput) Location added in v0.32.0

func (o SessionOutput) Location() pulumi.StringOutput

func (SessionOutput) Name added in v0.32.0

The resource name of the session.

func (SessionOutput) Project added in v0.32.0

func (o SessionOutput) Project() pulumi.StringOutput

func (SessionOutput) RequestId added in v0.32.0

func (o SessionOutput) RequestId() pulumi.StringPtrOutput

Optional. A unique ID used to identify the request. If the service receives two CreateSessionRequests (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateSessionRequest)s with the same ID, the second request is ignored, and the first Session is created and stored in the backend.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.

func (SessionOutput) RuntimeConfig added in v0.32.0

func (o SessionOutput) RuntimeConfig() RuntimeConfigResponseOutput

Optional. Runtime configuration for the session execution.

func (SessionOutput) RuntimeInfo added in v0.32.0

func (o SessionOutput) RuntimeInfo() RuntimeInfoResponseOutput

Runtime information about session execution.

func (SessionOutput) SessionId added in v0.32.0

func (o SessionOutput) SessionId() pulumi.StringOutput

Required. The ID to use for the session, which becomes the final component of the session's resource name.This value must be 4-63 characters. Valid characters are /a-z-/.

func (SessionOutput) SessionTemplate added in v0.32.0

func (o SessionOutput) SessionTemplate() pulumi.StringOutput

Optional. The session template used by the session.Only resource names, including project ID and location, are valid.Example: * https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id] * projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id]The template must be in the same project and Dataproc region as the session.

func (SessionOutput) State added in v0.32.0

func (o SessionOutput) State() pulumi.StringOutput

A state of the session.

func (SessionOutput) StateHistory added in v0.32.0

Historical state information for the session.

func (SessionOutput) StateMessage added in v0.32.0

func (o SessionOutput) StateMessage() pulumi.StringOutput

Session state details, such as the failure description if the state is FAILED.

func (SessionOutput) StateTime added in v0.32.0

func (o SessionOutput) StateTime() pulumi.StringOutput

The time when the session entered the current state.

func (SessionOutput) ToSessionOutput added in v0.32.0

func (o SessionOutput) ToSessionOutput() SessionOutput

func (SessionOutput) ToSessionOutputWithContext added in v0.32.0

func (o SessionOutput) ToSessionOutputWithContext(ctx context.Context) SessionOutput

func (SessionOutput) User added in v0.32.0

Optional. The email address of the user who owns the session.

func (SessionOutput) Uuid added in v0.32.0

A session UUID (Unique Universal Identifier). The service generates this value when it creates the session.

type SessionState added in v0.32.0

type SessionState struct {
}

func (SessionState) ElementType added in v0.32.0

func (SessionState) ElementType() reflect.Type

type SessionStateHistoryResponse added in v0.32.0

type SessionStateHistoryResponse struct {
	// The state of the session at this point in the session history.
	State string `pulumi:"state"`
	// Details about the state at this point in the session history.
	StateMessage string `pulumi:"stateMessage"`
	// The time when the session entered the historical state.
	StateStartTime string `pulumi:"stateStartTime"`
}

Historical state information.

type SessionStateHistoryResponseArrayOutput added in v0.32.0

type SessionStateHistoryResponseArrayOutput struct{ *pulumi.OutputState }

func (SessionStateHistoryResponseArrayOutput) ElementType added in v0.32.0

func (SessionStateHistoryResponseArrayOutput) Index added in v0.32.0

func (SessionStateHistoryResponseArrayOutput) ToSessionStateHistoryResponseArrayOutput added in v0.32.0

func (o SessionStateHistoryResponseArrayOutput) ToSessionStateHistoryResponseArrayOutput() SessionStateHistoryResponseArrayOutput

func (SessionStateHistoryResponseArrayOutput) ToSessionStateHistoryResponseArrayOutputWithContext added in v0.32.0

func (o SessionStateHistoryResponseArrayOutput) ToSessionStateHistoryResponseArrayOutputWithContext(ctx context.Context) SessionStateHistoryResponseArrayOutput

type SessionStateHistoryResponseOutput added in v0.32.0

type SessionStateHistoryResponseOutput struct{ *pulumi.OutputState }

Historical state information.

func (SessionStateHistoryResponseOutput) ElementType added in v0.32.0

func (SessionStateHistoryResponseOutput) State added in v0.32.0

The state of the session at this point in the session history.

func (SessionStateHistoryResponseOutput) StateMessage added in v0.32.0

Details about the state at this point in the session history.

func (SessionStateHistoryResponseOutput) StateStartTime added in v0.32.0

The time when the session entered the historical state.

func (SessionStateHistoryResponseOutput) ToSessionStateHistoryResponseOutput added in v0.32.0

func (o SessionStateHistoryResponseOutput) ToSessionStateHistoryResponseOutput() SessionStateHistoryResponseOutput

func (SessionStateHistoryResponseOutput) ToSessionStateHistoryResponseOutputWithContext added in v0.32.0

func (o SessionStateHistoryResponseOutput) ToSessionStateHistoryResponseOutputWithContext(ctx context.Context) SessionStateHistoryResponseOutput

type SessionTemplate added in v0.32.0

type SessionTemplate struct {
	pulumi.CustomResourceState

	// The time when the template was created.
	CreateTime pulumi.StringOutput `pulumi:"createTime"`
	// The email address of the user who created the template.
	Creator pulumi.StringOutput `pulumi:"creator"`
	// Optional. Brief description of the template.
	Description pulumi.StringOutput `pulumi:"description"`
	// Optional. Environment configuration for session execution.
	EnvironmentConfig EnvironmentConfigResponseOutput `pulumi:"environmentConfig"`
	// Optional. Jupyter session config.
	JupyterSession JupyterConfigResponseOutput `pulumi:"jupyterSession"`
	// Optional. Labels to associate with sessions created using this template. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.
	Labels   pulumi.StringMapOutput `pulumi:"labels"`
	Location pulumi.StringOutput    `pulumi:"location"`
	// The resource name of the session template.
	Name    pulumi.StringOutput `pulumi:"name"`
	Project pulumi.StringOutput `pulumi:"project"`
	// Optional. Runtime configuration for session execution.
	RuntimeConfig RuntimeConfigResponseOutput `pulumi:"runtimeConfig"`
	// The time the template was last updated.
	UpdateTime pulumi.StringOutput `pulumi:"updateTime"`
	// A session template UUID (Unique Universal Identifier). The service generates this value when it creates the session template.
	Uuid pulumi.StringOutput `pulumi:"uuid"`
}

Create a session template synchronously.

func GetSessionTemplate added in v0.32.0

func GetSessionTemplate(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *SessionTemplateState, opts ...pulumi.ResourceOption) (*SessionTemplate, error)

GetSessionTemplate gets an existing SessionTemplate resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewSessionTemplate added in v0.32.0

func NewSessionTemplate(ctx *pulumi.Context,
	name string, args *SessionTemplateArgs, opts ...pulumi.ResourceOption) (*SessionTemplate, error)

NewSessionTemplate registers a new resource with the given unique name, arguments, and options.

func (*SessionTemplate) ElementType added in v0.32.0

func (*SessionTemplate) ElementType() reflect.Type

func (*SessionTemplate) ToSessionTemplateOutput added in v0.32.0

func (i *SessionTemplate) ToSessionTemplateOutput() SessionTemplateOutput

func (*SessionTemplate) ToSessionTemplateOutputWithContext added in v0.32.0

func (i *SessionTemplate) ToSessionTemplateOutputWithContext(ctx context.Context) SessionTemplateOutput

type SessionTemplateArgs added in v0.32.0

type SessionTemplateArgs struct {
	// Optional. Brief description of the template.
	Description pulumi.StringPtrInput
	// Optional. Environment configuration for session execution.
	EnvironmentConfig EnvironmentConfigPtrInput
	// Optional. Jupyter session config.
	JupyterSession JupyterConfigPtrInput
	// Optional. Labels to associate with sessions created using this template. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.
	Labels   pulumi.StringMapInput
	Location pulumi.StringPtrInput
	// The resource name of the session template.
	Name    pulumi.StringPtrInput
	Project pulumi.StringPtrInput
	// Optional. Runtime configuration for session execution.
	RuntimeConfig RuntimeConfigPtrInput
}

The set of arguments for constructing a SessionTemplate resource.

func (SessionTemplateArgs) ElementType added in v0.32.0

func (SessionTemplateArgs) ElementType() reflect.Type

type SessionTemplateInput added in v0.32.0

type SessionTemplateInput interface {
	pulumi.Input

	ToSessionTemplateOutput() SessionTemplateOutput
	ToSessionTemplateOutputWithContext(ctx context.Context) SessionTemplateOutput
}

type SessionTemplateOutput added in v0.32.0

type SessionTemplateOutput struct{ *pulumi.OutputState }

func (SessionTemplateOutput) CreateTime added in v0.32.0

func (o SessionTemplateOutput) CreateTime() pulumi.StringOutput

The time when the template was created.

func (SessionTemplateOutput) Creator added in v0.32.0

The email address of the user who created the template.

func (SessionTemplateOutput) Description added in v0.32.0

func (o SessionTemplateOutput) Description() pulumi.StringOutput

Optional. Brief description of the template.

func (SessionTemplateOutput) ElementType added in v0.32.0

func (SessionTemplateOutput) ElementType() reflect.Type

func (SessionTemplateOutput) EnvironmentConfig added in v0.32.0

Optional. Environment configuration for session execution.

func (SessionTemplateOutput) JupyterSession added in v0.32.0

Optional. Jupyter session config.

func (SessionTemplateOutput) Labels added in v0.32.0

Optional. Labels to associate with sessions created using this template. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.

func (SessionTemplateOutput) Location added in v0.32.0

func (SessionTemplateOutput) Name added in v0.32.0

The resource name of the session template.

func (SessionTemplateOutput) Project added in v0.32.0

func (SessionTemplateOutput) RuntimeConfig added in v0.32.0

Optional. Runtime configuration for session execution.

func (SessionTemplateOutput) ToSessionTemplateOutput added in v0.32.0

func (o SessionTemplateOutput) ToSessionTemplateOutput() SessionTemplateOutput

func (SessionTemplateOutput) ToSessionTemplateOutputWithContext added in v0.32.0

func (o SessionTemplateOutput) ToSessionTemplateOutputWithContext(ctx context.Context) SessionTemplateOutput

func (SessionTemplateOutput) UpdateTime added in v0.32.0

func (o SessionTemplateOutput) UpdateTime() pulumi.StringOutput

The time the template was last updated.

func (SessionTemplateOutput) Uuid added in v0.32.0

A session template UUID (Unique Universal Identifier). The service generates this value when it creates the session template.

type SessionTemplateState added in v0.32.0

type SessionTemplateState struct {
}

func (SessionTemplateState) ElementType added in v0.32.0

func (SessionTemplateState) ElementType() reflect.Type

type ShieldedInstanceConfig

type ShieldedInstanceConfig struct {
	// Optional. Defines whether instances have integrity monitoring enabled.
	EnableIntegrityMonitoring *bool `pulumi:"enableIntegrityMonitoring"`
	// Optional. Defines whether instances have Secure Boot enabled.
	EnableSecureBoot *bool `pulumi:"enableSecureBoot"`
	// Optional. Defines whether instances have the vTPM enabled.
	EnableVtpm *bool `pulumi:"enableVtpm"`
}

Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm).

type ShieldedInstanceConfigArgs

type ShieldedInstanceConfigArgs struct {
	// Optional. Defines whether instances have integrity monitoring enabled.
	EnableIntegrityMonitoring pulumi.BoolPtrInput `pulumi:"enableIntegrityMonitoring"`
	// Optional. Defines whether instances have Secure Boot enabled.
	EnableSecureBoot pulumi.BoolPtrInput `pulumi:"enableSecureBoot"`
	// Optional. Defines whether instances have the vTPM enabled.
	EnableVtpm pulumi.BoolPtrInput `pulumi:"enableVtpm"`
}

Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm).

func (ShieldedInstanceConfigArgs) ElementType

func (ShieldedInstanceConfigArgs) ElementType() reflect.Type

func (ShieldedInstanceConfigArgs) ToShieldedInstanceConfigOutput

func (i ShieldedInstanceConfigArgs) ToShieldedInstanceConfigOutput() ShieldedInstanceConfigOutput

func (ShieldedInstanceConfigArgs) ToShieldedInstanceConfigOutputWithContext

func (i ShieldedInstanceConfigArgs) ToShieldedInstanceConfigOutputWithContext(ctx context.Context) ShieldedInstanceConfigOutput

func (ShieldedInstanceConfigArgs) ToShieldedInstanceConfigPtrOutput

func (i ShieldedInstanceConfigArgs) ToShieldedInstanceConfigPtrOutput() ShieldedInstanceConfigPtrOutput

func (ShieldedInstanceConfigArgs) ToShieldedInstanceConfigPtrOutputWithContext

func (i ShieldedInstanceConfigArgs) ToShieldedInstanceConfigPtrOutputWithContext(ctx context.Context) ShieldedInstanceConfigPtrOutput

type ShieldedInstanceConfigInput

type ShieldedInstanceConfigInput interface {
	pulumi.Input

	ToShieldedInstanceConfigOutput() ShieldedInstanceConfigOutput
	ToShieldedInstanceConfigOutputWithContext(context.Context) ShieldedInstanceConfigOutput
}

ShieldedInstanceConfigInput is an input type that accepts ShieldedInstanceConfigArgs and ShieldedInstanceConfigOutput values. You can construct a concrete instance of `ShieldedInstanceConfigInput` via:

ShieldedInstanceConfigArgs{...}

type ShieldedInstanceConfigOutput

type ShieldedInstanceConfigOutput struct{ *pulumi.OutputState }

Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm).

func (ShieldedInstanceConfigOutput) ElementType

func (ShieldedInstanceConfigOutput) EnableIntegrityMonitoring

func (o ShieldedInstanceConfigOutput) EnableIntegrityMonitoring() pulumi.BoolPtrOutput

Optional. Defines whether instances have integrity monitoring enabled.

func (ShieldedInstanceConfigOutput) EnableSecureBoot

func (o ShieldedInstanceConfigOutput) EnableSecureBoot() pulumi.BoolPtrOutput

Optional. Defines whether instances have Secure Boot enabled.

func (ShieldedInstanceConfigOutput) EnableVtpm

Optional. Defines whether instances have the vTPM enabled.

func (ShieldedInstanceConfigOutput) ToShieldedInstanceConfigOutput

func (o ShieldedInstanceConfigOutput) ToShieldedInstanceConfigOutput() ShieldedInstanceConfigOutput

func (ShieldedInstanceConfigOutput) ToShieldedInstanceConfigOutputWithContext

func (o ShieldedInstanceConfigOutput) ToShieldedInstanceConfigOutputWithContext(ctx context.Context) ShieldedInstanceConfigOutput

func (ShieldedInstanceConfigOutput) ToShieldedInstanceConfigPtrOutput

func (o ShieldedInstanceConfigOutput) ToShieldedInstanceConfigPtrOutput() ShieldedInstanceConfigPtrOutput

func (ShieldedInstanceConfigOutput) ToShieldedInstanceConfigPtrOutputWithContext

func (o ShieldedInstanceConfigOutput) ToShieldedInstanceConfigPtrOutputWithContext(ctx context.Context) ShieldedInstanceConfigPtrOutput

type ShieldedInstanceConfigPtrInput

type ShieldedInstanceConfigPtrInput interface {
	pulumi.Input

	ToShieldedInstanceConfigPtrOutput() ShieldedInstanceConfigPtrOutput
	ToShieldedInstanceConfigPtrOutputWithContext(context.Context) ShieldedInstanceConfigPtrOutput
}

ShieldedInstanceConfigPtrInput is an input type that accepts ShieldedInstanceConfigArgs, ShieldedInstanceConfigPtr and ShieldedInstanceConfigPtrOutput values. You can construct a concrete instance of `ShieldedInstanceConfigPtrInput` via:

        ShieldedInstanceConfigArgs{...}

or:

        nil

type ShieldedInstanceConfigPtrOutput

type ShieldedInstanceConfigPtrOutput struct{ *pulumi.OutputState }

func (ShieldedInstanceConfigPtrOutput) Elem

func (ShieldedInstanceConfigPtrOutput) ElementType

func (ShieldedInstanceConfigPtrOutput) EnableIntegrityMonitoring

func (o ShieldedInstanceConfigPtrOutput) EnableIntegrityMonitoring() pulumi.BoolPtrOutput

Optional. Defines whether instances have integrity monitoring enabled.

func (ShieldedInstanceConfigPtrOutput) EnableSecureBoot

Optional. Defines whether instances have Secure Boot enabled.

func (ShieldedInstanceConfigPtrOutput) EnableVtpm

Optional. Defines whether instances have the vTPM enabled.

func (ShieldedInstanceConfigPtrOutput) ToShieldedInstanceConfigPtrOutput

func (o ShieldedInstanceConfigPtrOutput) ToShieldedInstanceConfigPtrOutput() ShieldedInstanceConfigPtrOutput

func (ShieldedInstanceConfigPtrOutput) ToShieldedInstanceConfigPtrOutputWithContext

func (o ShieldedInstanceConfigPtrOutput) ToShieldedInstanceConfigPtrOutputWithContext(ctx context.Context) ShieldedInstanceConfigPtrOutput

type ShieldedInstanceConfigResponse

type ShieldedInstanceConfigResponse struct {
	// Optional. Defines whether instances have integrity monitoring enabled.
	EnableIntegrityMonitoring bool `pulumi:"enableIntegrityMonitoring"`
	// Optional. Defines whether instances have Secure Boot enabled.
	EnableSecureBoot bool `pulumi:"enableSecureBoot"`
	// Optional. Defines whether instances have the vTPM enabled.
	EnableVtpm bool `pulumi:"enableVtpm"`
}

Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm).

type ShieldedInstanceConfigResponseOutput

type ShieldedInstanceConfigResponseOutput struct{ *pulumi.OutputState }

Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm).

func (ShieldedInstanceConfigResponseOutput) ElementType

func (ShieldedInstanceConfigResponseOutput) EnableIntegrityMonitoring

func (o ShieldedInstanceConfigResponseOutput) EnableIntegrityMonitoring() pulumi.BoolOutput

Optional. Defines whether instances have integrity monitoring enabled.

func (ShieldedInstanceConfigResponseOutput) EnableSecureBoot

Optional. Defines whether instances have Secure Boot enabled.

func (ShieldedInstanceConfigResponseOutput) EnableVtpm

Optional. Defines whether instances have the vTPM enabled.

func (ShieldedInstanceConfigResponseOutput) ToShieldedInstanceConfigResponseOutput

func (o ShieldedInstanceConfigResponseOutput) ToShieldedInstanceConfigResponseOutput() ShieldedInstanceConfigResponseOutput

func (ShieldedInstanceConfigResponseOutput) ToShieldedInstanceConfigResponseOutputWithContext

func (o ShieldedInstanceConfigResponseOutput) ToShieldedInstanceConfigResponseOutputWithContext(ctx context.Context) ShieldedInstanceConfigResponseOutput

type SoftwareConfig

type SoftwareConfig struct {
	// Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.
	ImageVersion *string `pulumi:"imageVersion"`
	// Optional. The set of components to activate on the cluster.
	OptionalComponents []SoftwareConfigOptionalComponentsItem `pulumi:"optionalComponents"`
	// Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties map[string]string `pulumi:"properties"`
}

Specifies the selection and config of software inside the cluster.

type SoftwareConfigArgs

type SoftwareConfigArgs struct {
	// Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.
	ImageVersion pulumi.StringPtrInput `pulumi:"imageVersion"`
	// Optional. The set of components to activate on the cluster.
	OptionalComponents SoftwareConfigOptionalComponentsItemArrayInput `pulumi:"optionalComponents"`
	// Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties pulumi.StringMapInput `pulumi:"properties"`
}

Specifies the selection and config of software inside the cluster.

func (SoftwareConfigArgs) ElementType

func (SoftwareConfigArgs) ElementType() reflect.Type

func (SoftwareConfigArgs) ToSoftwareConfigOutput

func (i SoftwareConfigArgs) ToSoftwareConfigOutput() SoftwareConfigOutput

func (SoftwareConfigArgs) ToSoftwareConfigOutputWithContext

func (i SoftwareConfigArgs) ToSoftwareConfigOutputWithContext(ctx context.Context) SoftwareConfigOutput

func (SoftwareConfigArgs) ToSoftwareConfigPtrOutput

func (i SoftwareConfigArgs) ToSoftwareConfigPtrOutput() SoftwareConfigPtrOutput

func (SoftwareConfigArgs) ToSoftwareConfigPtrOutputWithContext

func (i SoftwareConfigArgs) ToSoftwareConfigPtrOutputWithContext(ctx context.Context) SoftwareConfigPtrOutput

type SoftwareConfigInput

type SoftwareConfigInput interface {
	pulumi.Input

	ToSoftwareConfigOutput() SoftwareConfigOutput
	ToSoftwareConfigOutputWithContext(context.Context) SoftwareConfigOutput
}

SoftwareConfigInput is an input type that accepts SoftwareConfigArgs and SoftwareConfigOutput values. You can construct a concrete instance of `SoftwareConfigInput` via:

SoftwareConfigArgs{...}

type SoftwareConfigOptionalComponentsItem added in v0.4.0

type SoftwareConfigOptionalComponentsItem string

func (SoftwareConfigOptionalComponentsItem) ElementType added in v0.4.0

func (SoftwareConfigOptionalComponentsItem) ToSoftwareConfigOptionalComponentsItemOutput added in v0.6.0

func (e SoftwareConfigOptionalComponentsItem) ToSoftwareConfigOptionalComponentsItemOutput() SoftwareConfigOptionalComponentsItemOutput

func (SoftwareConfigOptionalComponentsItem) ToSoftwareConfigOptionalComponentsItemOutputWithContext added in v0.6.0

func (e SoftwareConfigOptionalComponentsItem) ToSoftwareConfigOptionalComponentsItemOutputWithContext(ctx context.Context) SoftwareConfigOptionalComponentsItemOutput

func (SoftwareConfigOptionalComponentsItem) ToSoftwareConfigOptionalComponentsItemPtrOutput added in v0.6.0

func (e SoftwareConfigOptionalComponentsItem) ToSoftwareConfigOptionalComponentsItemPtrOutput() SoftwareConfigOptionalComponentsItemPtrOutput

func (SoftwareConfigOptionalComponentsItem) ToSoftwareConfigOptionalComponentsItemPtrOutputWithContext added in v0.6.0

func (e SoftwareConfigOptionalComponentsItem) ToSoftwareConfigOptionalComponentsItemPtrOutputWithContext(ctx context.Context) SoftwareConfigOptionalComponentsItemPtrOutput

func (SoftwareConfigOptionalComponentsItem) ToStringOutput added in v0.4.0

func (SoftwareConfigOptionalComponentsItem) ToStringOutputWithContext added in v0.4.0

func (e SoftwareConfigOptionalComponentsItem) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput

func (SoftwareConfigOptionalComponentsItem) ToStringPtrOutput added in v0.4.0

func (SoftwareConfigOptionalComponentsItem) ToStringPtrOutputWithContext added in v0.4.0

func (e SoftwareConfigOptionalComponentsItem) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput

type SoftwareConfigOptionalComponentsItemArray added in v0.4.0

type SoftwareConfigOptionalComponentsItemArray []SoftwareConfigOptionalComponentsItem

func (SoftwareConfigOptionalComponentsItemArray) ElementType added in v0.4.0

func (SoftwareConfigOptionalComponentsItemArray) ToSoftwareConfigOptionalComponentsItemArrayOutput added in v0.4.0

func (i SoftwareConfigOptionalComponentsItemArray) ToSoftwareConfigOptionalComponentsItemArrayOutput() SoftwareConfigOptionalComponentsItemArrayOutput

func (SoftwareConfigOptionalComponentsItemArray) ToSoftwareConfigOptionalComponentsItemArrayOutputWithContext added in v0.4.0

func (i SoftwareConfigOptionalComponentsItemArray) ToSoftwareConfigOptionalComponentsItemArrayOutputWithContext(ctx context.Context) SoftwareConfigOptionalComponentsItemArrayOutput

type SoftwareConfigOptionalComponentsItemArrayInput added in v0.4.0

type SoftwareConfigOptionalComponentsItemArrayInput interface {
	pulumi.Input

	ToSoftwareConfigOptionalComponentsItemArrayOutput() SoftwareConfigOptionalComponentsItemArrayOutput
	ToSoftwareConfigOptionalComponentsItemArrayOutputWithContext(context.Context) SoftwareConfigOptionalComponentsItemArrayOutput
}

SoftwareConfigOptionalComponentsItemArrayInput is an input type that accepts SoftwareConfigOptionalComponentsItemArray and SoftwareConfigOptionalComponentsItemArrayOutput values. You can construct a concrete instance of `SoftwareConfigOptionalComponentsItemArrayInput` via:

SoftwareConfigOptionalComponentsItemArray{ SoftwareConfigOptionalComponentsItemArgs{...} }

type SoftwareConfigOptionalComponentsItemArrayOutput added in v0.4.0

type SoftwareConfigOptionalComponentsItemArrayOutput struct{ *pulumi.OutputState }

func (SoftwareConfigOptionalComponentsItemArrayOutput) ElementType added in v0.4.0

func (SoftwareConfigOptionalComponentsItemArrayOutput) Index added in v0.4.0

func (SoftwareConfigOptionalComponentsItemArrayOutput) ToSoftwareConfigOptionalComponentsItemArrayOutput added in v0.4.0

func (o SoftwareConfigOptionalComponentsItemArrayOutput) ToSoftwareConfigOptionalComponentsItemArrayOutput() SoftwareConfigOptionalComponentsItemArrayOutput

func (SoftwareConfigOptionalComponentsItemArrayOutput) ToSoftwareConfigOptionalComponentsItemArrayOutputWithContext added in v0.4.0

func (o SoftwareConfigOptionalComponentsItemArrayOutput) ToSoftwareConfigOptionalComponentsItemArrayOutputWithContext(ctx context.Context) SoftwareConfigOptionalComponentsItemArrayOutput

type SoftwareConfigOptionalComponentsItemInput added in v0.6.0

type SoftwareConfigOptionalComponentsItemInput interface {
	pulumi.Input

	ToSoftwareConfigOptionalComponentsItemOutput() SoftwareConfigOptionalComponentsItemOutput
	ToSoftwareConfigOptionalComponentsItemOutputWithContext(context.Context) SoftwareConfigOptionalComponentsItemOutput
}

SoftwareConfigOptionalComponentsItemInput is an input type that accepts SoftwareConfigOptionalComponentsItemArgs and SoftwareConfigOptionalComponentsItemOutput values. You can construct a concrete instance of `SoftwareConfigOptionalComponentsItemInput` via:

SoftwareConfigOptionalComponentsItemArgs{...}

type SoftwareConfigOptionalComponentsItemOutput added in v0.6.0

type SoftwareConfigOptionalComponentsItemOutput struct{ *pulumi.OutputState }

func (SoftwareConfigOptionalComponentsItemOutput) ElementType added in v0.6.0

func (SoftwareConfigOptionalComponentsItemOutput) ToSoftwareConfigOptionalComponentsItemOutput added in v0.6.0

func (o SoftwareConfigOptionalComponentsItemOutput) ToSoftwareConfigOptionalComponentsItemOutput() SoftwareConfigOptionalComponentsItemOutput

func (SoftwareConfigOptionalComponentsItemOutput) ToSoftwareConfigOptionalComponentsItemOutputWithContext added in v0.6.0

func (o SoftwareConfigOptionalComponentsItemOutput) ToSoftwareConfigOptionalComponentsItemOutputWithContext(ctx context.Context) SoftwareConfigOptionalComponentsItemOutput

func (SoftwareConfigOptionalComponentsItemOutput) ToSoftwareConfigOptionalComponentsItemPtrOutput added in v0.6.0

func (o SoftwareConfigOptionalComponentsItemOutput) ToSoftwareConfigOptionalComponentsItemPtrOutput() SoftwareConfigOptionalComponentsItemPtrOutput

func (SoftwareConfigOptionalComponentsItemOutput) ToSoftwareConfigOptionalComponentsItemPtrOutputWithContext added in v0.6.0

func (o SoftwareConfigOptionalComponentsItemOutput) ToSoftwareConfigOptionalComponentsItemPtrOutputWithContext(ctx context.Context) SoftwareConfigOptionalComponentsItemPtrOutput

func (SoftwareConfigOptionalComponentsItemOutput) ToStringOutput added in v0.6.0

func (SoftwareConfigOptionalComponentsItemOutput) ToStringOutputWithContext added in v0.6.0

func (SoftwareConfigOptionalComponentsItemOutput) ToStringPtrOutput added in v0.6.0

func (SoftwareConfigOptionalComponentsItemOutput) ToStringPtrOutputWithContext added in v0.6.0

type SoftwareConfigOptionalComponentsItemPtrInput added in v0.6.0

type SoftwareConfigOptionalComponentsItemPtrInput interface {
	pulumi.Input

	ToSoftwareConfigOptionalComponentsItemPtrOutput() SoftwareConfigOptionalComponentsItemPtrOutput
	ToSoftwareConfigOptionalComponentsItemPtrOutputWithContext(context.Context) SoftwareConfigOptionalComponentsItemPtrOutput
}

func SoftwareConfigOptionalComponentsItemPtr added in v0.6.0

func SoftwareConfigOptionalComponentsItemPtr(v string) SoftwareConfigOptionalComponentsItemPtrInput

type SoftwareConfigOptionalComponentsItemPtrOutput added in v0.6.0

type SoftwareConfigOptionalComponentsItemPtrOutput struct{ *pulumi.OutputState }

func (SoftwareConfigOptionalComponentsItemPtrOutput) Elem added in v0.6.0

func (SoftwareConfigOptionalComponentsItemPtrOutput) ElementType added in v0.6.0

func (SoftwareConfigOptionalComponentsItemPtrOutput) ToSoftwareConfigOptionalComponentsItemPtrOutput added in v0.6.0

func (o SoftwareConfigOptionalComponentsItemPtrOutput) ToSoftwareConfigOptionalComponentsItemPtrOutput() SoftwareConfigOptionalComponentsItemPtrOutput

func (SoftwareConfigOptionalComponentsItemPtrOutput) ToSoftwareConfigOptionalComponentsItemPtrOutputWithContext added in v0.6.0

func (o SoftwareConfigOptionalComponentsItemPtrOutput) ToSoftwareConfigOptionalComponentsItemPtrOutputWithContext(ctx context.Context) SoftwareConfigOptionalComponentsItemPtrOutput

func (SoftwareConfigOptionalComponentsItemPtrOutput) ToStringPtrOutput added in v0.6.0

func (SoftwareConfigOptionalComponentsItemPtrOutput) ToStringPtrOutputWithContext added in v0.6.0

type SoftwareConfigOutput

type SoftwareConfigOutput struct{ *pulumi.OutputState }

Specifies the selection and config of software inside the cluster.

func (SoftwareConfigOutput) ElementType

func (SoftwareConfigOutput) ElementType() reflect.Type

func (SoftwareConfigOutput) ImageVersion

func (o SoftwareConfigOutput) ImageVersion() pulumi.StringPtrOutput

Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.

func (SoftwareConfigOutput) OptionalComponents

Optional. The set of components to activate on the cluster.

func (SoftwareConfigOutput) Properties

Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (SoftwareConfigOutput) ToSoftwareConfigOutput

func (o SoftwareConfigOutput) ToSoftwareConfigOutput() SoftwareConfigOutput

func (SoftwareConfigOutput) ToSoftwareConfigOutputWithContext

func (o SoftwareConfigOutput) ToSoftwareConfigOutputWithContext(ctx context.Context) SoftwareConfigOutput

func (SoftwareConfigOutput) ToSoftwareConfigPtrOutput

func (o SoftwareConfigOutput) ToSoftwareConfigPtrOutput() SoftwareConfigPtrOutput

func (SoftwareConfigOutput) ToSoftwareConfigPtrOutputWithContext

func (o SoftwareConfigOutput) ToSoftwareConfigPtrOutputWithContext(ctx context.Context) SoftwareConfigPtrOutput

type SoftwareConfigPtrInput

type SoftwareConfigPtrInput interface {
	pulumi.Input

	ToSoftwareConfigPtrOutput() SoftwareConfigPtrOutput
	ToSoftwareConfigPtrOutputWithContext(context.Context) SoftwareConfigPtrOutput
}

SoftwareConfigPtrInput is an input type that accepts SoftwareConfigArgs, SoftwareConfigPtr and SoftwareConfigPtrOutput values. You can construct a concrete instance of `SoftwareConfigPtrInput` via:

        SoftwareConfigArgs{...}

or:

        nil

type SoftwareConfigPtrOutput

type SoftwareConfigPtrOutput struct{ *pulumi.OutputState }

func (SoftwareConfigPtrOutput) Elem

func (SoftwareConfigPtrOutput) ElementType

func (SoftwareConfigPtrOutput) ElementType() reflect.Type

func (SoftwareConfigPtrOutput) ImageVersion

Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.

func (SoftwareConfigPtrOutput) OptionalComponents

Optional. The set of components to activate on the cluster.

func (SoftwareConfigPtrOutput) Properties

Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (SoftwareConfigPtrOutput) ToSoftwareConfigPtrOutput

func (o SoftwareConfigPtrOutput) ToSoftwareConfigPtrOutput() SoftwareConfigPtrOutput

func (SoftwareConfigPtrOutput) ToSoftwareConfigPtrOutputWithContext

func (o SoftwareConfigPtrOutput) ToSoftwareConfigPtrOutputWithContext(ctx context.Context) SoftwareConfigPtrOutput

type SoftwareConfigResponse

type SoftwareConfigResponse struct {
	// Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.
	ImageVersion string `pulumi:"imageVersion"`
	// Optional. The set of components to activate on the cluster.
	OptionalComponents []string `pulumi:"optionalComponents"`
	// Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties map[string]string `pulumi:"properties"`
}

Specifies the selection and config of software inside the cluster.

type SoftwareConfigResponseOutput

type SoftwareConfigResponseOutput struct{ *pulumi.OutputState }

Specifies the selection and config of software inside the cluster.

func (SoftwareConfigResponseOutput) ElementType

func (SoftwareConfigResponseOutput) ImageVersion

Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.

func (SoftwareConfigResponseOutput) OptionalComponents

func (o SoftwareConfigResponseOutput) OptionalComponents() pulumi.StringArrayOutput

Optional. The set of components to activate on the cluster.

func (SoftwareConfigResponseOutput) Properties

Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (SoftwareConfigResponseOutput) ToSoftwareConfigResponseOutput

func (o SoftwareConfigResponseOutput) ToSoftwareConfigResponseOutput() SoftwareConfigResponseOutput

func (SoftwareConfigResponseOutput) ToSoftwareConfigResponseOutputWithContext

func (o SoftwareConfigResponseOutput) ToSoftwareConfigResponseOutputWithContext(ctx context.Context) SoftwareConfigResponseOutput

type SparkBatch added in v0.12.0

type SparkBatch struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor.
	FileUris []string `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jar_file_uris.
	MainClass *string `pulumi:"mainClass"`
	// Optional. The HCFS URI of the jar file that contains the main class.
	MainJarFileUri *string `pulumi:"mainJarFileUri"`
}

A configuration for running an Apache Spark (https://spark.apache.org/) batch workload.

type SparkBatchArgs added in v0.12.0

type SparkBatchArgs struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// Optional. The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jar_file_uris.
	MainClass pulumi.StringPtrInput `pulumi:"mainClass"`
	// Optional. The HCFS URI of the jar file that contains the main class.
	MainJarFileUri pulumi.StringPtrInput `pulumi:"mainJarFileUri"`
}

A configuration for running an Apache Spark (https://spark.apache.org/) batch workload.

func (SparkBatchArgs) ElementType added in v0.12.0

func (SparkBatchArgs) ElementType() reflect.Type

func (SparkBatchArgs) ToSparkBatchOutput added in v0.12.0

func (i SparkBatchArgs) ToSparkBatchOutput() SparkBatchOutput

func (SparkBatchArgs) ToSparkBatchOutputWithContext added in v0.12.0

func (i SparkBatchArgs) ToSparkBatchOutputWithContext(ctx context.Context) SparkBatchOutput

func (SparkBatchArgs) ToSparkBatchPtrOutput added in v0.12.0

func (i SparkBatchArgs) ToSparkBatchPtrOutput() SparkBatchPtrOutput

func (SparkBatchArgs) ToSparkBatchPtrOutputWithContext added in v0.12.0

func (i SparkBatchArgs) ToSparkBatchPtrOutputWithContext(ctx context.Context) SparkBatchPtrOutput

type SparkBatchInput added in v0.12.0

type SparkBatchInput interface {
	pulumi.Input

	ToSparkBatchOutput() SparkBatchOutput
	ToSparkBatchOutputWithContext(context.Context) SparkBatchOutput
}

SparkBatchInput is an input type that accepts SparkBatchArgs and SparkBatchOutput values. You can construct a concrete instance of `SparkBatchInput` via:

SparkBatchArgs{...}

type SparkBatchOutput added in v0.12.0

type SparkBatchOutput struct{ *pulumi.OutputState }

A configuration for running an Apache Spark (https://spark.apache.org/) batch workload.

func (SparkBatchOutput) ArchiveUris added in v0.12.0

func (o SparkBatchOutput) ArchiveUris() pulumi.StringArrayOutput

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (SparkBatchOutput) Args added in v0.12.0

Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.

func (SparkBatchOutput) ElementType added in v0.12.0

func (SparkBatchOutput) ElementType() reflect.Type

func (SparkBatchOutput) FileUris added in v0.12.0

Optional. HCFS URIs of files to be placed in the working directory of each executor.

func (SparkBatchOutput) JarFileUris added in v0.12.0

func (o SparkBatchOutput) JarFileUris() pulumi.StringArrayOutput

Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.

func (SparkBatchOutput) MainClass added in v0.12.0

func (o SparkBatchOutput) MainClass() pulumi.StringPtrOutput

Optional. The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jar_file_uris.

func (SparkBatchOutput) MainJarFileUri added in v0.12.0

func (o SparkBatchOutput) MainJarFileUri() pulumi.StringPtrOutput

Optional. The HCFS URI of the jar file that contains the main class.

func (SparkBatchOutput) ToSparkBatchOutput added in v0.12.0

func (o SparkBatchOutput) ToSparkBatchOutput() SparkBatchOutput

func (SparkBatchOutput) ToSparkBatchOutputWithContext added in v0.12.0

func (o SparkBatchOutput) ToSparkBatchOutputWithContext(ctx context.Context) SparkBatchOutput

func (SparkBatchOutput) ToSparkBatchPtrOutput added in v0.12.0

func (o SparkBatchOutput) ToSparkBatchPtrOutput() SparkBatchPtrOutput

func (SparkBatchOutput) ToSparkBatchPtrOutputWithContext added in v0.12.0

func (o SparkBatchOutput) ToSparkBatchPtrOutputWithContext(ctx context.Context) SparkBatchPtrOutput

type SparkBatchPtrInput added in v0.12.0

type SparkBatchPtrInput interface {
	pulumi.Input

	ToSparkBatchPtrOutput() SparkBatchPtrOutput
	ToSparkBatchPtrOutputWithContext(context.Context) SparkBatchPtrOutput
}

SparkBatchPtrInput is an input type that accepts SparkBatchArgs, SparkBatchPtr and SparkBatchPtrOutput values. You can construct a concrete instance of `SparkBatchPtrInput` via:

        SparkBatchArgs{...}

or:

        nil

func SparkBatchPtr added in v0.12.0

func SparkBatchPtr(v *SparkBatchArgs) SparkBatchPtrInput

type SparkBatchPtrOutput added in v0.12.0

type SparkBatchPtrOutput struct{ *pulumi.OutputState }

func (SparkBatchPtrOutput) ArchiveUris added in v0.12.0

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (SparkBatchPtrOutput) Args added in v0.12.0

Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.

func (SparkBatchPtrOutput) Elem added in v0.12.0

func (SparkBatchPtrOutput) ElementType added in v0.12.0

func (SparkBatchPtrOutput) ElementType() reflect.Type

func (SparkBatchPtrOutput) FileUris added in v0.12.0

Optional. HCFS URIs of files to be placed in the working directory of each executor.

func (SparkBatchPtrOutput) JarFileUris added in v0.12.0

Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.

func (SparkBatchPtrOutput) MainClass added in v0.12.0

Optional. The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jar_file_uris.

func (SparkBatchPtrOutput) MainJarFileUri added in v0.12.0

func (o SparkBatchPtrOutput) MainJarFileUri() pulumi.StringPtrOutput

Optional. The HCFS URI of the jar file that contains the main class.

func (SparkBatchPtrOutput) ToSparkBatchPtrOutput added in v0.12.0

func (o SparkBatchPtrOutput) ToSparkBatchPtrOutput() SparkBatchPtrOutput

func (SparkBatchPtrOutput) ToSparkBatchPtrOutputWithContext added in v0.12.0

func (o SparkBatchPtrOutput) ToSparkBatchPtrOutputWithContext(ctx context.Context) SparkBatchPtrOutput

type SparkBatchResponse added in v0.12.0

type SparkBatchResponse struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor.
	FileUris []string `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jar_file_uris.
	MainClass string `pulumi:"mainClass"`
	// Optional. The HCFS URI of the jar file that contains the main class.
	MainJarFileUri string `pulumi:"mainJarFileUri"`
}

A configuration for running an Apache Spark (https://spark.apache.org/) batch workload.

type SparkBatchResponseOutput added in v0.12.0

type SparkBatchResponseOutput struct{ *pulumi.OutputState }

A configuration for running an Apache Spark (https://spark.apache.org/) batch workload.

func (SparkBatchResponseOutput) ArchiveUris added in v0.12.0

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (SparkBatchResponseOutput) Args added in v0.12.0

Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.

func (SparkBatchResponseOutput) ElementType added in v0.12.0

func (SparkBatchResponseOutput) ElementType() reflect.Type

func (SparkBatchResponseOutput) FileUris added in v0.12.0

Optional. HCFS URIs of files to be placed in the working directory of each executor.

func (SparkBatchResponseOutput) JarFileUris added in v0.12.0

Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.

func (SparkBatchResponseOutput) MainClass added in v0.12.0

Optional. The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jar_file_uris.

func (SparkBatchResponseOutput) MainJarFileUri added in v0.12.0

func (o SparkBatchResponseOutput) MainJarFileUri() pulumi.StringOutput

Optional. The HCFS URI of the jar file that contains the main class.

func (SparkBatchResponseOutput) ToSparkBatchResponseOutput added in v0.12.0

func (o SparkBatchResponseOutput) ToSparkBatchResponseOutput() SparkBatchResponseOutput

func (SparkBatchResponseOutput) ToSparkBatchResponseOutputWithContext added in v0.12.0

func (o SparkBatchResponseOutput) ToSparkBatchResponseOutputWithContext(ctx context.Context) SparkBatchResponseOutput

type SparkHistoryServerConfig added in v0.12.0

type SparkHistoryServerConfig struct {
	// Optional. Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.Example: projects/[project_id]/regions/[region]/clusters/[cluster_name]
	DataprocCluster *string `pulumi:"dataprocCluster"`
}

Spark History Server configuration for the workload.

type SparkHistoryServerConfigArgs added in v0.12.0

type SparkHistoryServerConfigArgs struct {
	// Optional. Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.Example: projects/[project_id]/regions/[region]/clusters/[cluster_name]
	DataprocCluster pulumi.StringPtrInput `pulumi:"dataprocCluster"`
}

Spark History Server configuration for the workload.

func (SparkHistoryServerConfigArgs) ElementType added in v0.12.0

func (SparkHistoryServerConfigArgs) ToSparkHistoryServerConfigOutput added in v0.12.0

func (i SparkHistoryServerConfigArgs) ToSparkHistoryServerConfigOutput() SparkHistoryServerConfigOutput

func (SparkHistoryServerConfigArgs) ToSparkHistoryServerConfigOutputWithContext added in v0.12.0

func (i SparkHistoryServerConfigArgs) ToSparkHistoryServerConfigOutputWithContext(ctx context.Context) SparkHistoryServerConfigOutput

func (SparkHistoryServerConfigArgs) ToSparkHistoryServerConfigPtrOutput added in v0.12.0

func (i SparkHistoryServerConfigArgs) ToSparkHistoryServerConfigPtrOutput() SparkHistoryServerConfigPtrOutput

func (SparkHistoryServerConfigArgs) ToSparkHistoryServerConfigPtrOutputWithContext added in v0.12.0

func (i SparkHistoryServerConfigArgs) ToSparkHistoryServerConfigPtrOutputWithContext(ctx context.Context) SparkHistoryServerConfigPtrOutput

type SparkHistoryServerConfigInput added in v0.12.0

type SparkHistoryServerConfigInput interface {
	pulumi.Input

	ToSparkHistoryServerConfigOutput() SparkHistoryServerConfigOutput
	ToSparkHistoryServerConfigOutputWithContext(context.Context) SparkHistoryServerConfigOutput
}

SparkHistoryServerConfigInput is an input type that accepts SparkHistoryServerConfigArgs and SparkHistoryServerConfigOutput values. You can construct a concrete instance of `SparkHistoryServerConfigInput` via:

SparkHistoryServerConfigArgs{...}

type SparkHistoryServerConfigOutput added in v0.12.0

type SparkHistoryServerConfigOutput struct{ *pulumi.OutputState }

Spark History Server configuration for the workload.

func (SparkHistoryServerConfigOutput) DataprocCluster added in v0.12.0

Optional. Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.Example: projects/[project_id]/regions/[region]/clusters/[cluster_name]

func (SparkHistoryServerConfigOutput) ElementType added in v0.12.0

func (SparkHistoryServerConfigOutput) ToSparkHistoryServerConfigOutput added in v0.12.0

func (o SparkHistoryServerConfigOutput) ToSparkHistoryServerConfigOutput() SparkHistoryServerConfigOutput

func (SparkHistoryServerConfigOutput) ToSparkHistoryServerConfigOutputWithContext added in v0.12.0

func (o SparkHistoryServerConfigOutput) ToSparkHistoryServerConfigOutputWithContext(ctx context.Context) SparkHistoryServerConfigOutput

func (SparkHistoryServerConfigOutput) ToSparkHistoryServerConfigPtrOutput added in v0.12.0

func (o SparkHistoryServerConfigOutput) ToSparkHistoryServerConfigPtrOutput() SparkHistoryServerConfigPtrOutput

func (SparkHistoryServerConfigOutput) ToSparkHistoryServerConfigPtrOutputWithContext added in v0.12.0

func (o SparkHistoryServerConfigOutput) ToSparkHistoryServerConfigPtrOutputWithContext(ctx context.Context) SparkHistoryServerConfigPtrOutput

type SparkHistoryServerConfigPtrInput added in v0.12.0

type SparkHistoryServerConfigPtrInput interface {
	pulumi.Input

	ToSparkHistoryServerConfigPtrOutput() SparkHistoryServerConfigPtrOutput
	ToSparkHistoryServerConfigPtrOutputWithContext(context.Context) SparkHistoryServerConfigPtrOutput
}

SparkHistoryServerConfigPtrInput is an input type that accepts SparkHistoryServerConfigArgs, SparkHistoryServerConfigPtr and SparkHistoryServerConfigPtrOutput values. You can construct a concrete instance of `SparkHistoryServerConfigPtrInput` via:

        SparkHistoryServerConfigArgs{...}

or:

        nil

func SparkHistoryServerConfigPtr added in v0.12.0

func SparkHistoryServerConfigPtr(v *SparkHistoryServerConfigArgs) SparkHistoryServerConfigPtrInput

type SparkHistoryServerConfigPtrOutput added in v0.12.0

type SparkHistoryServerConfigPtrOutput struct{ *pulumi.OutputState }

func (SparkHistoryServerConfigPtrOutput) DataprocCluster added in v0.12.0

Optional. Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.Example: projects/[project_id]/regions/[region]/clusters/[cluster_name]

func (SparkHistoryServerConfigPtrOutput) Elem added in v0.12.0

func (SparkHistoryServerConfigPtrOutput) ElementType added in v0.12.0

func (SparkHistoryServerConfigPtrOutput) ToSparkHistoryServerConfigPtrOutput added in v0.12.0

func (o SparkHistoryServerConfigPtrOutput) ToSparkHistoryServerConfigPtrOutput() SparkHistoryServerConfigPtrOutput

func (SparkHistoryServerConfigPtrOutput) ToSparkHistoryServerConfigPtrOutputWithContext added in v0.12.0

func (o SparkHistoryServerConfigPtrOutput) ToSparkHistoryServerConfigPtrOutputWithContext(ctx context.Context) SparkHistoryServerConfigPtrOutput

type SparkHistoryServerConfigResponse added in v0.12.0

type SparkHistoryServerConfigResponse struct {
	// Optional. Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.Example: projects/[project_id]/regions/[region]/clusters/[cluster_name]
	DataprocCluster string `pulumi:"dataprocCluster"`
}

Spark History Server configuration for the workload.

type SparkHistoryServerConfigResponseOutput added in v0.12.0

type SparkHistoryServerConfigResponseOutput struct{ *pulumi.OutputState }

Spark History Server configuration for the workload.

func (SparkHistoryServerConfigResponseOutput) DataprocCluster added in v0.12.0

Optional. Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.Example: projects/[project_id]/regions/[region]/clusters/[cluster_name]

func (SparkHistoryServerConfigResponseOutput) ElementType added in v0.12.0

func (SparkHistoryServerConfigResponseOutput) ToSparkHistoryServerConfigResponseOutput added in v0.12.0

func (o SparkHistoryServerConfigResponseOutput) ToSparkHistoryServerConfigResponseOutput() SparkHistoryServerConfigResponseOutput

func (SparkHistoryServerConfigResponseOutput) ToSparkHistoryServerConfigResponseOutputWithContext added in v0.12.0

func (o SparkHistoryServerConfigResponseOutput) ToSparkHistoryServerConfigResponseOutputWithContext(ctx context.Context) SparkHistoryServerConfigResponseOutput

type SparkJob

type SparkJob struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *LoggingConfig `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in SparkJob.jar_file_uris.
	MainClass *string `pulumi:"mainClass"`
	// The HCFS URI of the jar file that contains the main class.
	MainJarFileUri *string `pulumi:"mainJarFileUri"`
	// Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
	Properties map[string]string `pulumi:"properties"`
}

A Dataproc job for running Apache Spark (https://spark.apache.org/) applications on YARN.

type SparkJobArgs

type SparkJobArgs struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigPtrInput `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in SparkJob.jar_file_uris.
	MainClass pulumi.StringPtrInput `pulumi:"mainClass"`
	// The HCFS URI of the jar file that contains the main class.
	MainJarFileUri pulumi.StringPtrInput `pulumi:"mainJarFileUri"`
	// Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
	Properties pulumi.StringMapInput `pulumi:"properties"`
}

A Dataproc job for running Apache Spark (https://spark.apache.org/) applications on YARN.

func (SparkJobArgs) ElementType

func (SparkJobArgs) ElementType() reflect.Type

func (SparkJobArgs) ToSparkJobOutput

func (i SparkJobArgs) ToSparkJobOutput() SparkJobOutput

func (SparkJobArgs) ToSparkJobOutputWithContext

func (i SparkJobArgs) ToSparkJobOutputWithContext(ctx context.Context) SparkJobOutput

func (SparkJobArgs) ToSparkJobPtrOutput

func (i SparkJobArgs) ToSparkJobPtrOutput() SparkJobPtrOutput

func (SparkJobArgs) ToSparkJobPtrOutputWithContext

func (i SparkJobArgs) ToSparkJobPtrOutputWithContext(ctx context.Context) SparkJobPtrOutput

type SparkJobInput

type SparkJobInput interface {
	pulumi.Input

	ToSparkJobOutput() SparkJobOutput
	ToSparkJobOutputWithContext(context.Context) SparkJobOutput
}

SparkJobInput is an input type that accepts SparkJobArgs and SparkJobOutput values. You can construct a concrete instance of `SparkJobInput` via:

SparkJobArgs{...}

type SparkJobOutput

type SparkJobOutput struct{ *pulumi.OutputState }

A Dataproc job for running Apache Spark (https://spark.apache.org/) applications on YARN.

func (SparkJobOutput) ArchiveUris

func (o SparkJobOutput) ArchiveUris() pulumi.StringArrayOutput

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (SparkJobOutput) Args

Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (SparkJobOutput) ElementType

func (SparkJobOutput) ElementType() reflect.Type

func (SparkJobOutput) FileUris

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (SparkJobOutput) JarFileUris

func (o SparkJobOutput) JarFileUris() pulumi.StringArrayOutput

Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.

func (SparkJobOutput) LoggingConfig

func (o SparkJobOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (SparkJobOutput) MainClass

func (o SparkJobOutput) MainClass() pulumi.StringPtrOutput

The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in SparkJob.jar_file_uris.

func (SparkJobOutput) MainJarFileUri

func (o SparkJobOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file that contains the main class.

func (SparkJobOutput) Properties

func (o SparkJobOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.

func (SparkJobOutput) ToSparkJobOutput

func (o SparkJobOutput) ToSparkJobOutput() SparkJobOutput

func (SparkJobOutput) ToSparkJobOutputWithContext

func (o SparkJobOutput) ToSparkJobOutputWithContext(ctx context.Context) SparkJobOutput

func (SparkJobOutput) ToSparkJobPtrOutput

func (o SparkJobOutput) ToSparkJobPtrOutput() SparkJobPtrOutput

func (SparkJobOutput) ToSparkJobPtrOutputWithContext

func (o SparkJobOutput) ToSparkJobPtrOutputWithContext(ctx context.Context) SparkJobPtrOutput

type SparkJobPtrInput

type SparkJobPtrInput interface {
	pulumi.Input

	ToSparkJobPtrOutput() SparkJobPtrOutput
	ToSparkJobPtrOutputWithContext(context.Context) SparkJobPtrOutput
}

SparkJobPtrInput is an input type that accepts SparkJobArgs, SparkJobPtr and SparkJobPtrOutput values. You can construct a concrete instance of `SparkJobPtrInput` via:

        SparkJobArgs{...}

or:

        nil

func SparkJobPtr

func SparkJobPtr(v *SparkJobArgs) SparkJobPtrInput

type SparkJobPtrOutput

type SparkJobPtrOutput struct{ *pulumi.OutputState }

func (SparkJobPtrOutput) ArchiveUris

func (o SparkJobPtrOutput) ArchiveUris() pulumi.StringArrayOutput

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (SparkJobPtrOutput) Args

Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (SparkJobPtrOutput) Elem

func (SparkJobPtrOutput) ElementType

func (SparkJobPtrOutput) ElementType() reflect.Type

func (SparkJobPtrOutput) FileUris

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (SparkJobPtrOutput) JarFileUris

func (o SparkJobPtrOutput) JarFileUris() pulumi.StringArrayOutput

Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.

func (SparkJobPtrOutput) LoggingConfig

func (o SparkJobPtrOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (SparkJobPtrOutput) MainClass

The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in SparkJob.jar_file_uris.

func (SparkJobPtrOutput) MainJarFileUri

func (o SparkJobPtrOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file that contains the main class.

func (SparkJobPtrOutput) Properties

func (o SparkJobPtrOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.

func (SparkJobPtrOutput) ToSparkJobPtrOutput

func (o SparkJobPtrOutput) ToSparkJobPtrOutput() SparkJobPtrOutput

func (SparkJobPtrOutput) ToSparkJobPtrOutputWithContext

func (o SparkJobPtrOutput) ToSparkJobPtrOutputWithContext(ctx context.Context) SparkJobPtrOutput

type SparkJobResponse

type SparkJobResponse struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigResponse `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in SparkJob.jar_file_uris.
	MainClass string `pulumi:"mainClass"`
	// The HCFS URI of the jar file that contains the main class.
	MainJarFileUri string `pulumi:"mainJarFileUri"`
	// Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
	Properties map[string]string `pulumi:"properties"`
}

A Dataproc job for running Apache Spark (https://spark.apache.org/) applications on YARN.

type SparkJobResponseOutput

type SparkJobResponseOutput struct{ *pulumi.OutputState }

A Dataproc job for running Apache Spark (https://spark.apache.org/) applications on YARN.

func (SparkJobResponseOutput) ArchiveUris

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (SparkJobResponseOutput) Args

Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (SparkJobResponseOutput) ElementType

func (SparkJobResponseOutput) ElementType() reflect.Type

func (SparkJobResponseOutput) FileUris

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (SparkJobResponseOutput) JarFileUris

Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.

func (SparkJobResponseOutput) LoggingConfig

Optional. The runtime log config for job execution.

func (SparkJobResponseOutput) MainClass

The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in SparkJob.jar_file_uris.

func (SparkJobResponseOutput) MainJarFileUri

func (o SparkJobResponseOutput) MainJarFileUri() pulumi.StringOutput

The HCFS URI of the jar file that contains the main class.

func (SparkJobResponseOutput) Properties

Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.

func (SparkJobResponseOutput) ToSparkJobResponseOutput

func (o SparkJobResponseOutput) ToSparkJobResponseOutput() SparkJobResponseOutput

func (SparkJobResponseOutput) ToSparkJobResponseOutputWithContext

func (o SparkJobResponseOutput) ToSparkJobResponseOutputWithContext(ctx context.Context) SparkJobResponseOutput

type SparkRBatch added in v0.12.0

type SparkRBatch struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the Spark driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor.
	FileUris []string `pulumi:"fileUris"`
	// The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.
	MainRFileUri string `pulumi:"mainRFileUri"`
}

A configuration for running an Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) batch workload.

type SparkRBatchArgs added in v0.12.0

type SparkRBatchArgs struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the Spark driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.
	MainRFileUri pulumi.StringInput `pulumi:"mainRFileUri"`
}

A configuration for running an Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) batch workload.

func (SparkRBatchArgs) ElementType added in v0.12.0

func (SparkRBatchArgs) ElementType() reflect.Type

func (SparkRBatchArgs) ToSparkRBatchOutput added in v0.12.0

func (i SparkRBatchArgs) ToSparkRBatchOutput() SparkRBatchOutput

func (SparkRBatchArgs) ToSparkRBatchOutputWithContext added in v0.12.0

func (i SparkRBatchArgs) ToSparkRBatchOutputWithContext(ctx context.Context) SparkRBatchOutput

func (SparkRBatchArgs) ToSparkRBatchPtrOutput added in v0.12.0

func (i SparkRBatchArgs) ToSparkRBatchPtrOutput() SparkRBatchPtrOutput

func (SparkRBatchArgs) ToSparkRBatchPtrOutputWithContext added in v0.12.0

func (i SparkRBatchArgs) ToSparkRBatchPtrOutputWithContext(ctx context.Context) SparkRBatchPtrOutput

type SparkRBatchInput added in v0.12.0

type SparkRBatchInput interface {
	pulumi.Input

	ToSparkRBatchOutput() SparkRBatchOutput
	ToSparkRBatchOutputWithContext(context.Context) SparkRBatchOutput
}

SparkRBatchInput is an input type that accepts SparkRBatchArgs and SparkRBatchOutput values. You can construct a concrete instance of `SparkRBatchInput` via:

SparkRBatchArgs{...}

type SparkRBatchOutput added in v0.12.0

type SparkRBatchOutput struct{ *pulumi.OutputState }

A configuration for running an Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) batch workload.

func (SparkRBatchOutput) ArchiveUris added in v0.12.0

func (o SparkRBatchOutput) ArchiveUris() pulumi.StringArrayOutput

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (SparkRBatchOutput) Args added in v0.12.0

Optional. The arguments to pass to the Spark driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.

func (SparkRBatchOutput) ElementType added in v0.12.0

func (SparkRBatchOutput) ElementType() reflect.Type

func (SparkRBatchOutput) FileUris added in v0.12.0

Optional. HCFS URIs of files to be placed in the working directory of each executor.

func (SparkRBatchOutput) MainRFileUri added in v0.12.0

func (o SparkRBatchOutput) MainRFileUri() pulumi.StringOutput

The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.

func (SparkRBatchOutput) ToSparkRBatchOutput added in v0.12.0

func (o SparkRBatchOutput) ToSparkRBatchOutput() SparkRBatchOutput

func (SparkRBatchOutput) ToSparkRBatchOutputWithContext added in v0.12.0

func (o SparkRBatchOutput) ToSparkRBatchOutputWithContext(ctx context.Context) SparkRBatchOutput

func (SparkRBatchOutput) ToSparkRBatchPtrOutput added in v0.12.0

func (o SparkRBatchOutput) ToSparkRBatchPtrOutput() SparkRBatchPtrOutput

func (SparkRBatchOutput) ToSparkRBatchPtrOutputWithContext added in v0.12.0

func (o SparkRBatchOutput) ToSparkRBatchPtrOutputWithContext(ctx context.Context) SparkRBatchPtrOutput

type SparkRBatchPtrInput added in v0.12.0

type SparkRBatchPtrInput interface {
	pulumi.Input

	ToSparkRBatchPtrOutput() SparkRBatchPtrOutput
	ToSparkRBatchPtrOutputWithContext(context.Context) SparkRBatchPtrOutput
}

SparkRBatchPtrInput is an input type that accepts SparkRBatchArgs, SparkRBatchPtr and SparkRBatchPtrOutput values. You can construct a concrete instance of `SparkRBatchPtrInput` via:

        SparkRBatchArgs{...}

or:

        nil

func SparkRBatchPtr added in v0.12.0

func SparkRBatchPtr(v *SparkRBatchArgs) SparkRBatchPtrInput

type SparkRBatchPtrOutput added in v0.12.0

type SparkRBatchPtrOutput struct{ *pulumi.OutputState }

func (SparkRBatchPtrOutput) ArchiveUris added in v0.12.0

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (SparkRBatchPtrOutput) Args added in v0.12.0

Optional. The arguments to pass to the Spark driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.

func (SparkRBatchPtrOutput) Elem added in v0.12.0

func (SparkRBatchPtrOutput) ElementType added in v0.12.0

func (SparkRBatchPtrOutput) ElementType() reflect.Type

func (SparkRBatchPtrOutput) FileUris added in v0.12.0

Optional. HCFS URIs of files to be placed in the working directory of each executor.

func (SparkRBatchPtrOutput) MainRFileUri added in v0.12.0

func (o SparkRBatchPtrOutput) MainRFileUri() pulumi.StringPtrOutput

The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.

func (SparkRBatchPtrOutput) ToSparkRBatchPtrOutput added in v0.12.0

func (o SparkRBatchPtrOutput) ToSparkRBatchPtrOutput() SparkRBatchPtrOutput

func (SparkRBatchPtrOutput) ToSparkRBatchPtrOutputWithContext added in v0.12.0

func (o SparkRBatchPtrOutput) ToSparkRBatchPtrOutputWithContext(ctx context.Context) SparkRBatchPtrOutput

type SparkRBatchResponse added in v0.12.0

type SparkRBatchResponse struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the Spark driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor.
	FileUris []string `pulumi:"fileUris"`
	// The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.
	MainRFileUri string `pulumi:"mainRFileUri"`
}

A configuration for running an Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) batch workload.

type SparkRBatchResponseOutput added in v0.12.0

type SparkRBatchResponseOutput struct{ *pulumi.OutputState }

A configuration for running an Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) batch workload.

func (SparkRBatchResponseOutput) ArchiveUris added in v0.12.0

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (SparkRBatchResponseOutput) Args added in v0.12.0

Optional. The arguments to pass to the Spark driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.

func (SparkRBatchResponseOutput) ElementType added in v0.12.0

func (SparkRBatchResponseOutput) ElementType() reflect.Type

func (SparkRBatchResponseOutput) FileUris added in v0.12.0

Optional. HCFS URIs of files to be placed in the working directory of each executor.

func (SparkRBatchResponseOutput) MainRFileUri added in v0.12.0

The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.

func (SparkRBatchResponseOutput) ToSparkRBatchResponseOutput added in v0.12.0

func (o SparkRBatchResponseOutput) ToSparkRBatchResponseOutput() SparkRBatchResponseOutput

func (SparkRBatchResponseOutput) ToSparkRBatchResponseOutputWithContext added in v0.12.0

func (o SparkRBatchResponseOutput) ToSparkRBatchResponseOutputWithContext(ctx context.Context) SparkRBatchResponseOutput

type SparkRJob

type SparkRJob struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *LoggingConfig `pulumi:"loggingConfig"`
	// The HCFS URI of the main R file to use as the driver. Must be a .R file.
	MainRFileUri string `pulumi:"mainRFileUri"`
	// Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
	Properties map[string]string `pulumi:"properties"`
}

A Dataproc job for running Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN.

type SparkRJobArgs

type SparkRJobArgs struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigPtrInput `pulumi:"loggingConfig"`
	// The HCFS URI of the main R file to use as the driver. Must be a .R file.
	MainRFileUri pulumi.StringInput `pulumi:"mainRFileUri"`
	// Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
	Properties pulumi.StringMapInput `pulumi:"properties"`
}

A Dataproc job for running Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN.

func (SparkRJobArgs) ElementType

func (SparkRJobArgs) ElementType() reflect.Type

func (SparkRJobArgs) ToSparkRJobOutput

func (i SparkRJobArgs) ToSparkRJobOutput() SparkRJobOutput

func (SparkRJobArgs) ToSparkRJobOutputWithContext

func (i SparkRJobArgs) ToSparkRJobOutputWithContext(ctx context.Context) SparkRJobOutput

func (SparkRJobArgs) ToSparkRJobPtrOutput

func (i SparkRJobArgs) ToSparkRJobPtrOutput() SparkRJobPtrOutput

func (SparkRJobArgs) ToSparkRJobPtrOutputWithContext

func (i SparkRJobArgs) ToSparkRJobPtrOutputWithContext(ctx context.Context) SparkRJobPtrOutput

type SparkRJobInput

type SparkRJobInput interface {
	pulumi.Input

	ToSparkRJobOutput() SparkRJobOutput
	ToSparkRJobOutputWithContext(context.Context) SparkRJobOutput
}

SparkRJobInput is an input type that accepts SparkRJobArgs and SparkRJobOutput values. You can construct a concrete instance of `SparkRJobInput` via:

SparkRJobArgs{...}

type SparkRJobOutput

type SparkRJobOutput struct{ *pulumi.OutputState }

A Dataproc job for running Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN.

func (SparkRJobOutput) ArchiveUris

func (o SparkRJobOutput) ArchiveUris() pulumi.StringArrayOutput

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (SparkRJobOutput) Args

Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (SparkRJobOutput) ElementType

func (SparkRJobOutput) ElementType() reflect.Type

func (SparkRJobOutput) FileUris

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (SparkRJobOutput) LoggingConfig

func (o SparkRJobOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (SparkRJobOutput) MainRFileUri

func (o SparkRJobOutput) MainRFileUri() pulumi.StringOutput

The HCFS URI of the main R file to use as the driver. Must be a .R file.

func (SparkRJobOutput) Properties

func (o SparkRJobOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.

func (SparkRJobOutput) ToSparkRJobOutput

func (o SparkRJobOutput) ToSparkRJobOutput() SparkRJobOutput

func (SparkRJobOutput) ToSparkRJobOutputWithContext

func (o SparkRJobOutput) ToSparkRJobOutputWithContext(ctx context.Context) SparkRJobOutput

func (SparkRJobOutput) ToSparkRJobPtrOutput

func (o SparkRJobOutput) ToSparkRJobPtrOutput() SparkRJobPtrOutput

func (SparkRJobOutput) ToSparkRJobPtrOutputWithContext

func (o SparkRJobOutput) ToSparkRJobPtrOutputWithContext(ctx context.Context) SparkRJobPtrOutput

type SparkRJobPtrInput

type SparkRJobPtrInput interface {
	pulumi.Input

	ToSparkRJobPtrOutput() SparkRJobPtrOutput
	ToSparkRJobPtrOutputWithContext(context.Context) SparkRJobPtrOutput
}

SparkRJobPtrInput is an input type that accepts SparkRJobArgs, SparkRJobPtr and SparkRJobPtrOutput values. You can construct a concrete instance of `SparkRJobPtrInput` via:

        SparkRJobArgs{...}

or:

        nil

func SparkRJobPtr

func SparkRJobPtr(v *SparkRJobArgs) SparkRJobPtrInput

type SparkRJobPtrOutput

type SparkRJobPtrOutput struct{ *pulumi.OutputState }

func (SparkRJobPtrOutput) ArchiveUris

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (SparkRJobPtrOutput) Args

Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (SparkRJobPtrOutput) Elem

func (SparkRJobPtrOutput) ElementType

func (SparkRJobPtrOutput) ElementType() reflect.Type

func (SparkRJobPtrOutput) FileUris

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (SparkRJobPtrOutput) LoggingConfig

func (o SparkRJobPtrOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (SparkRJobPtrOutput) MainRFileUri

func (o SparkRJobPtrOutput) MainRFileUri() pulumi.StringPtrOutput

The HCFS URI of the main R file to use as the driver. Must be a .R file.

func (SparkRJobPtrOutput) Properties

func (o SparkRJobPtrOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.

func (SparkRJobPtrOutput) ToSparkRJobPtrOutput

func (o SparkRJobPtrOutput) ToSparkRJobPtrOutput() SparkRJobPtrOutput

func (SparkRJobPtrOutput) ToSparkRJobPtrOutputWithContext

func (o SparkRJobPtrOutput) ToSparkRJobPtrOutputWithContext(ctx context.Context) SparkRJobPtrOutput

type SparkRJobResponse

type SparkRJobResponse struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigResponse `pulumi:"loggingConfig"`
	// The HCFS URI of the main R file to use as the driver. Must be a .R file.
	MainRFileUri string `pulumi:"mainRFileUri"`
	// Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
	Properties map[string]string `pulumi:"properties"`
}

A Dataproc job for running Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN.

type SparkRJobResponseOutput

type SparkRJobResponseOutput struct{ *pulumi.OutputState }

A Dataproc job for running Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN.

func (SparkRJobResponseOutput) ArchiveUris

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (SparkRJobResponseOutput) Args

Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (SparkRJobResponseOutput) ElementType

func (SparkRJobResponseOutput) ElementType() reflect.Type

func (SparkRJobResponseOutput) FileUris

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (SparkRJobResponseOutput) LoggingConfig

Optional. The runtime log config for job execution.

func (SparkRJobResponseOutput) MainRFileUri

func (o SparkRJobResponseOutput) MainRFileUri() pulumi.StringOutput

The HCFS URI of the main R file to use as the driver. Must be a .R file.

func (SparkRJobResponseOutput) Properties

Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.

func (SparkRJobResponseOutput) ToSparkRJobResponseOutput

func (o SparkRJobResponseOutput) ToSparkRJobResponseOutput() SparkRJobResponseOutput

func (SparkRJobResponseOutput) ToSparkRJobResponseOutputWithContext

func (o SparkRJobResponseOutput) ToSparkRJobResponseOutputWithContext(ctx context.Context) SparkRJobResponseOutput

type SparkSqlBatch added in v0.12.0

type SparkSqlBatch struct {
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris []string `pulumi:"jarFileUris"`
	// The HCFS URI of the script that contains Spark SQL queries to execute.
	QueryFileUri string `pulumi:"queryFileUri"`
	// Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
	QueryVariables map[string]string `pulumi:"queryVariables"`
}

A configuration for running Apache Spark SQL (https://spark.apache.org/sql/) queries as a batch workload.

type SparkSqlBatchArgs added in v0.12.0

type SparkSqlBatchArgs struct {
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// The HCFS URI of the script that contains Spark SQL queries to execute.
	QueryFileUri pulumi.StringInput `pulumi:"queryFileUri"`
	// Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
	QueryVariables pulumi.StringMapInput `pulumi:"queryVariables"`
}

A configuration for running Apache Spark SQL (https://spark.apache.org/sql/) queries as a batch workload.

func (SparkSqlBatchArgs) ElementType added in v0.12.0

func (SparkSqlBatchArgs) ElementType() reflect.Type

func (SparkSqlBatchArgs) ToSparkSqlBatchOutput added in v0.12.0

func (i SparkSqlBatchArgs) ToSparkSqlBatchOutput() SparkSqlBatchOutput

func (SparkSqlBatchArgs) ToSparkSqlBatchOutputWithContext added in v0.12.0

func (i SparkSqlBatchArgs) ToSparkSqlBatchOutputWithContext(ctx context.Context) SparkSqlBatchOutput

func (SparkSqlBatchArgs) ToSparkSqlBatchPtrOutput added in v0.12.0

func (i SparkSqlBatchArgs) ToSparkSqlBatchPtrOutput() SparkSqlBatchPtrOutput

func (SparkSqlBatchArgs) ToSparkSqlBatchPtrOutputWithContext added in v0.12.0

func (i SparkSqlBatchArgs) ToSparkSqlBatchPtrOutputWithContext(ctx context.Context) SparkSqlBatchPtrOutput

type SparkSqlBatchInput added in v0.12.0

type SparkSqlBatchInput interface {
	pulumi.Input

	ToSparkSqlBatchOutput() SparkSqlBatchOutput
	ToSparkSqlBatchOutputWithContext(context.Context) SparkSqlBatchOutput
}

SparkSqlBatchInput is an input type that accepts SparkSqlBatchArgs and SparkSqlBatchOutput values. You can construct a concrete instance of `SparkSqlBatchInput` via:

SparkSqlBatchArgs{...}

type SparkSqlBatchOutput added in v0.12.0

type SparkSqlBatchOutput struct{ *pulumi.OutputState }

A configuration for running Apache Spark SQL (https://spark.apache.org/sql/) queries as a batch workload.

func (SparkSqlBatchOutput) ElementType added in v0.12.0

func (SparkSqlBatchOutput) ElementType() reflect.Type

func (SparkSqlBatchOutput) JarFileUris added in v0.12.0

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (SparkSqlBatchOutput) QueryFileUri added in v0.12.0

func (o SparkSqlBatchOutput) QueryFileUri() pulumi.StringOutput

The HCFS URI of the script that contains Spark SQL queries to execute.

func (SparkSqlBatchOutput) QueryVariables added in v0.12.0

func (o SparkSqlBatchOutput) QueryVariables() pulumi.StringMapOutput

Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).

func (SparkSqlBatchOutput) ToSparkSqlBatchOutput added in v0.12.0

func (o SparkSqlBatchOutput) ToSparkSqlBatchOutput() SparkSqlBatchOutput

func (SparkSqlBatchOutput) ToSparkSqlBatchOutputWithContext added in v0.12.0

func (o SparkSqlBatchOutput) ToSparkSqlBatchOutputWithContext(ctx context.Context) SparkSqlBatchOutput

func (SparkSqlBatchOutput) ToSparkSqlBatchPtrOutput added in v0.12.0

func (o SparkSqlBatchOutput) ToSparkSqlBatchPtrOutput() SparkSqlBatchPtrOutput

func (SparkSqlBatchOutput) ToSparkSqlBatchPtrOutputWithContext added in v0.12.0

func (o SparkSqlBatchOutput) ToSparkSqlBatchPtrOutputWithContext(ctx context.Context) SparkSqlBatchPtrOutput

type SparkSqlBatchPtrInput added in v0.12.0

type SparkSqlBatchPtrInput interface {
	pulumi.Input

	ToSparkSqlBatchPtrOutput() SparkSqlBatchPtrOutput
	ToSparkSqlBatchPtrOutputWithContext(context.Context) SparkSqlBatchPtrOutput
}

SparkSqlBatchPtrInput is an input type that accepts SparkSqlBatchArgs, SparkSqlBatchPtr and SparkSqlBatchPtrOutput values. You can construct a concrete instance of `SparkSqlBatchPtrInput` via:

        SparkSqlBatchArgs{...}

or:

        nil

func SparkSqlBatchPtr added in v0.12.0

func SparkSqlBatchPtr(v *SparkSqlBatchArgs) SparkSqlBatchPtrInput

type SparkSqlBatchPtrOutput added in v0.12.0

type SparkSqlBatchPtrOutput struct{ *pulumi.OutputState }

func (SparkSqlBatchPtrOutput) Elem added in v0.12.0

func (SparkSqlBatchPtrOutput) ElementType added in v0.12.0

func (SparkSqlBatchPtrOutput) ElementType() reflect.Type

func (SparkSqlBatchPtrOutput) JarFileUris added in v0.12.0

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (SparkSqlBatchPtrOutput) QueryFileUri added in v0.12.0

The HCFS URI of the script that contains Spark SQL queries to execute.

func (SparkSqlBatchPtrOutput) QueryVariables added in v0.12.0

func (o SparkSqlBatchPtrOutput) QueryVariables() pulumi.StringMapOutput

Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).

func (SparkSqlBatchPtrOutput) ToSparkSqlBatchPtrOutput added in v0.12.0

func (o SparkSqlBatchPtrOutput) ToSparkSqlBatchPtrOutput() SparkSqlBatchPtrOutput

func (SparkSqlBatchPtrOutput) ToSparkSqlBatchPtrOutputWithContext added in v0.12.0

func (o SparkSqlBatchPtrOutput) ToSparkSqlBatchPtrOutputWithContext(ctx context.Context) SparkSqlBatchPtrOutput

type SparkSqlBatchResponse added in v0.12.0

type SparkSqlBatchResponse struct {
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris []string `pulumi:"jarFileUris"`
	// The HCFS URI of the script that contains Spark SQL queries to execute.
	QueryFileUri string `pulumi:"queryFileUri"`
	// Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
	QueryVariables map[string]string `pulumi:"queryVariables"`
}

A configuration for running Apache Spark SQL (https://spark.apache.org/sql/) queries as a batch workload.

type SparkSqlBatchResponseOutput added in v0.12.0

type SparkSqlBatchResponseOutput struct{ *pulumi.OutputState }

A configuration for running Apache Spark SQL (https://spark.apache.org/sql/) queries as a batch workload.

func (SparkSqlBatchResponseOutput) ElementType added in v0.12.0

func (SparkSqlBatchResponseOutput) JarFileUris added in v0.12.0

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (SparkSqlBatchResponseOutput) QueryFileUri added in v0.12.0

The HCFS URI of the script that contains Spark SQL queries to execute.

func (SparkSqlBatchResponseOutput) QueryVariables added in v0.12.0

Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).

func (SparkSqlBatchResponseOutput) ToSparkSqlBatchResponseOutput added in v0.12.0

func (o SparkSqlBatchResponseOutput) ToSparkSqlBatchResponseOutput() SparkSqlBatchResponseOutput

func (SparkSqlBatchResponseOutput) ToSparkSqlBatchResponseOutputWithContext added in v0.12.0

func (o SparkSqlBatchResponseOutput) ToSparkSqlBatchResponseOutputWithContext(ctx context.Context) SparkSqlBatchResponseOutput

type SparkSqlJob

type SparkSqlJob struct {
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *LoggingConfig `pulumi:"loggingConfig"`
	// Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API might be overwritten.
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri *string `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList *QueryList `pulumi:"queryList"`
	// Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

A Dataproc job for running Apache Spark SQL (https://spark.apache.org/sql/) queries.

type SparkSqlJobArgs

type SparkSqlJobArgs struct {
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigPtrInput `pulumi:"loggingConfig"`
	// Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API might be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri pulumi.StringPtrInput `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList QueryListPtrInput `pulumi:"queryList"`
	// Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
	ScriptVariables pulumi.StringMapInput `pulumi:"scriptVariables"`
}

A Dataproc job for running Apache Spark SQL (https://spark.apache.org/sql/) queries.

func (SparkSqlJobArgs) ElementType

func (SparkSqlJobArgs) ElementType() reflect.Type

func (SparkSqlJobArgs) ToSparkSqlJobOutput

func (i SparkSqlJobArgs) ToSparkSqlJobOutput() SparkSqlJobOutput

func (SparkSqlJobArgs) ToSparkSqlJobOutputWithContext

func (i SparkSqlJobArgs) ToSparkSqlJobOutputWithContext(ctx context.Context) SparkSqlJobOutput

func (SparkSqlJobArgs) ToSparkSqlJobPtrOutput

func (i SparkSqlJobArgs) ToSparkSqlJobPtrOutput() SparkSqlJobPtrOutput

func (SparkSqlJobArgs) ToSparkSqlJobPtrOutputWithContext

func (i SparkSqlJobArgs) ToSparkSqlJobPtrOutputWithContext(ctx context.Context) SparkSqlJobPtrOutput

type SparkSqlJobInput

type SparkSqlJobInput interface {
	pulumi.Input

	ToSparkSqlJobOutput() SparkSqlJobOutput
	ToSparkSqlJobOutputWithContext(context.Context) SparkSqlJobOutput
}

SparkSqlJobInput is an input type that accepts SparkSqlJobArgs and SparkSqlJobOutput values. You can construct a concrete instance of `SparkSqlJobInput` via:

SparkSqlJobArgs{...}

type SparkSqlJobOutput

type SparkSqlJobOutput struct{ *pulumi.OutputState }

A Dataproc job for running Apache Spark SQL (https://spark.apache.org/sql/) queries.

func (SparkSqlJobOutput) ElementType

func (SparkSqlJobOutput) ElementType() reflect.Type

func (SparkSqlJobOutput) JarFileUris

func (o SparkSqlJobOutput) JarFileUris() pulumi.StringArrayOutput

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (SparkSqlJobOutput) LoggingConfig

func (o SparkSqlJobOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (SparkSqlJobOutput) Properties

func (o SparkSqlJobOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API might be overwritten.

func (SparkSqlJobOutput) QueryFileUri

func (o SparkSqlJobOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains SQL queries.

func (SparkSqlJobOutput) QueryList

func (o SparkSqlJobOutput) QueryList() QueryListPtrOutput

A list of queries.

func (SparkSqlJobOutput) ScriptVariables

func (o SparkSqlJobOutput) ScriptVariables() pulumi.StringMapOutput

Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).

func (SparkSqlJobOutput) ToSparkSqlJobOutput

func (o SparkSqlJobOutput) ToSparkSqlJobOutput() SparkSqlJobOutput

func (SparkSqlJobOutput) ToSparkSqlJobOutputWithContext

func (o SparkSqlJobOutput) ToSparkSqlJobOutputWithContext(ctx context.Context) SparkSqlJobOutput

func (SparkSqlJobOutput) ToSparkSqlJobPtrOutput

func (o SparkSqlJobOutput) ToSparkSqlJobPtrOutput() SparkSqlJobPtrOutput

func (SparkSqlJobOutput) ToSparkSqlJobPtrOutputWithContext

func (o SparkSqlJobOutput) ToSparkSqlJobPtrOutputWithContext(ctx context.Context) SparkSqlJobPtrOutput

type SparkSqlJobPtrInput

type SparkSqlJobPtrInput interface {
	pulumi.Input

	ToSparkSqlJobPtrOutput() SparkSqlJobPtrOutput
	ToSparkSqlJobPtrOutputWithContext(context.Context) SparkSqlJobPtrOutput
}

SparkSqlJobPtrInput is an input type that accepts SparkSqlJobArgs, SparkSqlJobPtr and SparkSqlJobPtrOutput values. You can construct a concrete instance of `SparkSqlJobPtrInput` via:

        SparkSqlJobArgs{...}

or:

        nil

func SparkSqlJobPtr

func SparkSqlJobPtr(v *SparkSqlJobArgs) SparkSqlJobPtrInput

type SparkSqlJobPtrOutput

type SparkSqlJobPtrOutput struct{ *pulumi.OutputState }

func (SparkSqlJobPtrOutput) Elem

func (SparkSqlJobPtrOutput) ElementType

func (SparkSqlJobPtrOutput) ElementType() reflect.Type

func (SparkSqlJobPtrOutput) JarFileUris

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (SparkSqlJobPtrOutput) LoggingConfig

func (o SparkSqlJobPtrOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (SparkSqlJobPtrOutput) Properties

Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API might be overwritten.

func (SparkSqlJobPtrOutput) QueryFileUri

func (o SparkSqlJobPtrOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains SQL queries.

func (SparkSqlJobPtrOutput) QueryList

A list of queries.

func (SparkSqlJobPtrOutput) ScriptVariables

func (o SparkSqlJobPtrOutput) ScriptVariables() pulumi.StringMapOutput

Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).

func (SparkSqlJobPtrOutput) ToSparkSqlJobPtrOutput

func (o SparkSqlJobPtrOutput) ToSparkSqlJobPtrOutput() SparkSqlJobPtrOutput

func (SparkSqlJobPtrOutput) ToSparkSqlJobPtrOutputWithContext

func (o SparkSqlJobPtrOutput) ToSparkSqlJobPtrOutputWithContext(ctx context.Context) SparkSqlJobPtrOutput

type SparkSqlJobResponse

type SparkSqlJobResponse struct {
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigResponse `pulumi:"loggingConfig"`
	// Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API might be overwritten.
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri string `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList QueryListResponse `pulumi:"queryList"`
	// Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

A Dataproc job for running Apache Spark SQL (https://spark.apache.org/sql/) queries.

type SparkSqlJobResponseOutput

type SparkSqlJobResponseOutput struct{ *pulumi.OutputState }

A Dataproc job for running Apache Spark SQL (https://spark.apache.org/sql/) queries.

func (SparkSqlJobResponseOutput) ElementType

func (SparkSqlJobResponseOutput) ElementType() reflect.Type

func (SparkSqlJobResponseOutput) JarFileUris

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (SparkSqlJobResponseOutput) LoggingConfig

Optional. The runtime log config for job execution.

func (SparkSqlJobResponseOutput) Properties

Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API might be overwritten.

func (SparkSqlJobResponseOutput) QueryFileUri

The HCFS URI of the script that contains SQL queries.

func (SparkSqlJobResponseOutput) QueryList

A list of queries.

func (SparkSqlJobResponseOutput) ScriptVariables

func (o SparkSqlJobResponseOutput) ScriptVariables() pulumi.StringMapOutput

Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).

func (SparkSqlJobResponseOutput) ToSparkSqlJobResponseOutput

func (o SparkSqlJobResponseOutput) ToSparkSqlJobResponseOutput() SparkSqlJobResponseOutput

func (SparkSqlJobResponseOutput) ToSparkSqlJobResponseOutputWithContext

func (o SparkSqlJobResponseOutput) ToSparkSqlJobResponseOutputWithContext(ctx context.Context) SparkSqlJobResponseOutput

type SparkStandaloneAutoscalingConfig added in v0.9.0

type SparkStandaloneAutoscalingConfig struct {
	// Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
	GracefulDecommissionTimeout string `pulumi:"gracefulDecommissionTimeout"`
	// Optional. Remove only idle workers when scaling down cluster
	RemoveOnlyIdleWorkers *bool `pulumi:"removeOnlyIdleWorkers"`
	// Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
	ScaleDownFactor float64 `pulumi:"scaleDownFactor"`
	// Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
	ScaleDownMinWorkerFraction *float64 `pulumi:"scaleDownMinWorkerFraction"`
	// Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
	ScaleUpFactor float64 `pulumi:"scaleUpFactor"`
	// Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
	ScaleUpMinWorkerFraction *float64 `pulumi:"scaleUpMinWorkerFraction"`
}

Basic autoscaling configurations for Spark Standalone.

type SparkStandaloneAutoscalingConfigArgs added in v0.9.0

type SparkStandaloneAutoscalingConfigArgs struct {
	// Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
	GracefulDecommissionTimeout pulumi.StringInput `pulumi:"gracefulDecommissionTimeout"`
	// Optional. Remove only idle workers when scaling down cluster
	RemoveOnlyIdleWorkers pulumi.BoolPtrInput `pulumi:"removeOnlyIdleWorkers"`
	// Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
	ScaleDownFactor pulumi.Float64Input `pulumi:"scaleDownFactor"`
	// Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
	ScaleDownMinWorkerFraction pulumi.Float64PtrInput `pulumi:"scaleDownMinWorkerFraction"`
	// Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
	ScaleUpFactor pulumi.Float64Input `pulumi:"scaleUpFactor"`
	// Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
	ScaleUpMinWorkerFraction pulumi.Float64PtrInput `pulumi:"scaleUpMinWorkerFraction"`
}

Basic autoscaling configurations for Spark Standalone.

func (SparkStandaloneAutoscalingConfigArgs) ElementType added in v0.9.0

func (SparkStandaloneAutoscalingConfigArgs) ToSparkStandaloneAutoscalingConfigOutput added in v0.9.0

func (i SparkStandaloneAutoscalingConfigArgs) ToSparkStandaloneAutoscalingConfigOutput() SparkStandaloneAutoscalingConfigOutput

func (SparkStandaloneAutoscalingConfigArgs) ToSparkStandaloneAutoscalingConfigOutputWithContext added in v0.9.0

func (i SparkStandaloneAutoscalingConfigArgs) ToSparkStandaloneAutoscalingConfigOutputWithContext(ctx context.Context) SparkStandaloneAutoscalingConfigOutput

func (SparkStandaloneAutoscalingConfigArgs) ToSparkStandaloneAutoscalingConfigPtrOutput added in v0.9.0

func (i SparkStandaloneAutoscalingConfigArgs) ToSparkStandaloneAutoscalingConfigPtrOutput() SparkStandaloneAutoscalingConfigPtrOutput

func (SparkStandaloneAutoscalingConfigArgs) ToSparkStandaloneAutoscalingConfigPtrOutputWithContext added in v0.9.0

func (i SparkStandaloneAutoscalingConfigArgs) ToSparkStandaloneAutoscalingConfigPtrOutputWithContext(ctx context.Context) SparkStandaloneAutoscalingConfigPtrOutput

type SparkStandaloneAutoscalingConfigInput added in v0.9.0

type SparkStandaloneAutoscalingConfigInput interface {
	pulumi.Input

	ToSparkStandaloneAutoscalingConfigOutput() SparkStandaloneAutoscalingConfigOutput
	ToSparkStandaloneAutoscalingConfigOutputWithContext(context.Context) SparkStandaloneAutoscalingConfigOutput
}

SparkStandaloneAutoscalingConfigInput is an input type that accepts SparkStandaloneAutoscalingConfigArgs and SparkStandaloneAutoscalingConfigOutput values. You can construct a concrete instance of `SparkStandaloneAutoscalingConfigInput` via:

SparkStandaloneAutoscalingConfigArgs{...}

type SparkStandaloneAutoscalingConfigOutput added in v0.9.0

type SparkStandaloneAutoscalingConfigOutput struct{ *pulumi.OutputState }

Basic autoscaling configurations for Spark Standalone.

func (SparkStandaloneAutoscalingConfigOutput) ElementType added in v0.9.0

func (SparkStandaloneAutoscalingConfigOutput) GracefulDecommissionTimeout added in v0.9.0

func (o SparkStandaloneAutoscalingConfigOutput) GracefulDecommissionTimeout() pulumi.StringOutput

Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.

func (SparkStandaloneAutoscalingConfigOutput) RemoveOnlyIdleWorkers added in v0.32.0

Optional. Remove only idle workers when scaling down cluster

func (SparkStandaloneAutoscalingConfigOutput) ScaleDownFactor added in v0.9.0

Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.

func (SparkStandaloneAutoscalingConfigOutput) ScaleDownMinWorkerFraction added in v0.9.0

func (o SparkStandaloneAutoscalingConfigOutput) ScaleDownMinWorkerFraction() pulumi.Float64PtrOutput

Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.

func (SparkStandaloneAutoscalingConfigOutput) ScaleUpFactor added in v0.9.0

Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.

func (SparkStandaloneAutoscalingConfigOutput) ScaleUpMinWorkerFraction added in v0.9.0

func (o SparkStandaloneAutoscalingConfigOutput) ScaleUpMinWorkerFraction() pulumi.Float64PtrOutput

Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.

func (SparkStandaloneAutoscalingConfigOutput) ToSparkStandaloneAutoscalingConfigOutput added in v0.9.0

func (o SparkStandaloneAutoscalingConfigOutput) ToSparkStandaloneAutoscalingConfigOutput() SparkStandaloneAutoscalingConfigOutput

func (SparkStandaloneAutoscalingConfigOutput) ToSparkStandaloneAutoscalingConfigOutputWithContext added in v0.9.0

func (o SparkStandaloneAutoscalingConfigOutput) ToSparkStandaloneAutoscalingConfigOutputWithContext(ctx context.Context) SparkStandaloneAutoscalingConfigOutput

func (SparkStandaloneAutoscalingConfigOutput) ToSparkStandaloneAutoscalingConfigPtrOutput added in v0.9.0

func (o SparkStandaloneAutoscalingConfigOutput) ToSparkStandaloneAutoscalingConfigPtrOutput() SparkStandaloneAutoscalingConfigPtrOutput

func (SparkStandaloneAutoscalingConfigOutput) ToSparkStandaloneAutoscalingConfigPtrOutputWithContext added in v0.9.0

func (o SparkStandaloneAutoscalingConfigOutput) ToSparkStandaloneAutoscalingConfigPtrOutputWithContext(ctx context.Context) SparkStandaloneAutoscalingConfigPtrOutput

type SparkStandaloneAutoscalingConfigPtrInput added in v0.9.0

type SparkStandaloneAutoscalingConfigPtrInput interface {
	pulumi.Input

	ToSparkStandaloneAutoscalingConfigPtrOutput() SparkStandaloneAutoscalingConfigPtrOutput
	ToSparkStandaloneAutoscalingConfigPtrOutputWithContext(context.Context) SparkStandaloneAutoscalingConfigPtrOutput
}

SparkStandaloneAutoscalingConfigPtrInput is an input type that accepts SparkStandaloneAutoscalingConfigArgs, SparkStandaloneAutoscalingConfigPtr and SparkStandaloneAutoscalingConfigPtrOutput values. You can construct a concrete instance of `SparkStandaloneAutoscalingConfigPtrInput` via:

        SparkStandaloneAutoscalingConfigArgs{...}

or:

        nil

type SparkStandaloneAutoscalingConfigPtrOutput added in v0.9.0

type SparkStandaloneAutoscalingConfigPtrOutput struct{ *pulumi.OutputState }

func (SparkStandaloneAutoscalingConfigPtrOutput) Elem added in v0.9.0

func (SparkStandaloneAutoscalingConfigPtrOutput) ElementType added in v0.9.0

func (SparkStandaloneAutoscalingConfigPtrOutput) GracefulDecommissionTimeout added in v0.9.0

func (o SparkStandaloneAutoscalingConfigPtrOutput) GracefulDecommissionTimeout() pulumi.StringPtrOutput

Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.

func (SparkStandaloneAutoscalingConfigPtrOutput) RemoveOnlyIdleWorkers added in v0.32.0

Optional. Remove only idle workers when scaling down cluster

func (SparkStandaloneAutoscalingConfigPtrOutput) ScaleDownFactor added in v0.9.0

Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.

func (SparkStandaloneAutoscalingConfigPtrOutput) ScaleDownMinWorkerFraction added in v0.9.0

func (o SparkStandaloneAutoscalingConfigPtrOutput) ScaleDownMinWorkerFraction() pulumi.Float64PtrOutput

Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.

func (SparkStandaloneAutoscalingConfigPtrOutput) ScaleUpFactor added in v0.9.0

Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.

func (SparkStandaloneAutoscalingConfigPtrOutput) ScaleUpMinWorkerFraction added in v0.9.0

Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.

func (SparkStandaloneAutoscalingConfigPtrOutput) ToSparkStandaloneAutoscalingConfigPtrOutput added in v0.9.0

func (o SparkStandaloneAutoscalingConfigPtrOutput) ToSparkStandaloneAutoscalingConfigPtrOutput() SparkStandaloneAutoscalingConfigPtrOutput

func (SparkStandaloneAutoscalingConfigPtrOutput) ToSparkStandaloneAutoscalingConfigPtrOutputWithContext added in v0.9.0

func (o SparkStandaloneAutoscalingConfigPtrOutput) ToSparkStandaloneAutoscalingConfigPtrOutputWithContext(ctx context.Context) SparkStandaloneAutoscalingConfigPtrOutput

type SparkStandaloneAutoscalingConfigResponse added in v0.9.0

type SparkStandaloneAutoscalingConfigResponse struct {
	// Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
	GracefulDecommissionTimeout string `pulumi:"gracefulDecommissionTimeout"`
	// Optional. Remove only idle workers when scaling down cluster
	RemoveOnlyIdleWorkers bool `pulumi:"removeOnlyIdleWorkers"`
	// Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
	ScaleDownFactor float64 `pulumi:"scaleDownFactor"`
	// Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
	ScaleDownMinWorkerFraction float64 `pulumi:"scaleDownMinWorkerFraction"`
	// Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
	ScaleUpFactor float64 `pulumi:"scaleUpFactor"`
	// Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
	ScaleUpMinWorkerFraction float64 `pulumi:"scaleUpMinWorkerFraction"`
}

Basic autoscaling configurations for Spark Standalone.

type SparkStandaloneAutoscalingConfigResponseOutput added in v0.9.0

type SparkStandaloneAutoscalingConfigResponseOutput struct{ *pulumi.OutputState }

Basic autoscaling configurations for Spark Standalone.

func (SparkStandaloneAutoscalingConfigResponseOutput) ElementType added in v0.9.0

func (SparkStandaloneAutoscalingConfigResponseOutput) GracefulDecommissionTimeout added in v0.9.0

func (o SparkStandaloneAutoscalingConfigResponseOutput) GracefulDecommissionTimeout() pulumi.StringOutput

Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.

func (SparkStandaloneAutoscalingConfigResponseOutput) RemoveOnlyIdleWorkers added in v0.32.0

Optional. Remove only idle workers when scaling down cluster

func (SparkStandaloneAutoscalingConfigResponseOutput) ScaleDownFactor added in v0.9.0

Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.

func (SparkStandaloneAutoscalingConfigResponseOutput) ScaleDownMinWorkerFraction added in v0.9.0

Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.

func (SparkStandaloneAutoscalingConfigResponseOutput) ScaleUpFactor added in v0.9.0

Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.

func (SparkStandaloneAutoscalingConfigResponseOutput) ScaleUpMinWorkerFraction added in v0.9.0

Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.

func (SparkStandaloneAutoscalingConfigResponseOutput) ToSparkStandaloneAutoscalingConfigResponseOutput added in v0.9.0

func (o SparkStandaloneAutoscalingConfigResponseOutput) ToSparkStandaloneAutoscalingConfigResponseOutput() SparkStandaloneAutoscalingConfigResponseOutput

func (SparkStandaloneAutoscalingConfigResponseOutput) ToSparkStandaloneAutoscalingConfigResponseOutputWithContext added in v0.9.0

func (o SparkStandaloneAutoscalingConfigResponseOutput) ToSparkStandaloneAutoscalingConfigResponseOutputWithContext(ctx context.Context) SparkStandaloneAutoscalingConfigResponseOutput

type StartupConfig added in v0.32.0

type StartupConfig struct {
	// Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled).
	RequiredRegistrationFraction *float64 `pulumi:"requiredRegistrationFraction"`
}

Configuration to handle the startup of instances during cluster create and update process.

type StartupConfigArgs added in v0.32.0

type StartupConfigArgs struct {
	// Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled).
	RequiredRegistrationFraction pulumi.Float64PtrInput `pulumi:"requiredRegistrationFraction"`
}

Configuration to handle the startup of instances during cluster create and update process.

func (StartupConfigArgs) ElementType added in v0.32.0

func (StartupConfigArgs) ElementType() reflect.Type

func (StartupConfigArgs) ToStartupConfigOutput added in v0.32.0

func (i StartupConfigArgs) ToStartupConfigOutput() StartupConfigOutput

func (StartupConfigArgs) ToStartupConfigOutputWithContext added in v0.32.0

func (i StartupConfigArgs) ToStartupConfigOutputWithContext(ctx context.Context) StartupConfigOutput

func (StartupConfigArgs) ToStartupConfigPtrOutput added in v0.32.0

func (i StartupConfigArgs) ToStartupConfigPtrOutput() StartupConfigPtrOutput

func (StartupConfigArgs) ToStartupConfigPtrOutputWithContext added in v0.32.0

func (i StartupConfigArgs) ToStartupConfigPtrOutputWithContext(ctx context.Context) StartupConfigPtrOutput

type StartupConfigInput added in v0.32.0

type StartupConfigInput interface {
	pulumi.Input

	ToStartupConfigOutput() StartupConfigOutput
	ToStartupConfigOutputWithContext(context.Context) StartupConfigOutput
}

StartupConfigInput is an input type that accepts StartupConfigArgs and StartupConfigOutput values. You can construct a concrete instance of `StartupConfigInput` via:

StartupConfigArgs{...}

type StartupConfigOutput added in v0.32.0

type StartupConfigOutput struct{ *pulumi.OutputState }

Configuration to handle the startup of instances during cluster create and update process.

func (StartupConfigOutput) ElementType added in v0.32.0

func (StartupConfigOutput) ElementType() reflect.Type

func (StartupConfigOutput) RequiredRegistrationFraction added in v0.32.0

func (o StartupConfigOutput) RequiredRegistrationFraction() pulumi.Float64PtrOutput

Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled).

func (StartupConfigOutput) ToStartupConfigOutput added in v0.32.0

func (o StartupConfigOutput) ToStartupConfigOutput() StartupConfigOutput

func (StartupConfigOutput) ToStartupConfigOutputWithContext added in v0.32.0

func (o StartupConfigOutput) ToStartupConfigOutputWithContext(ctx context.Context) StartupConfigOutput

func (StartupConfigOutput) ToStartupConfigPtrOutput added in v0.32.0

func (o StartupConfigOutput) ToStartupConfigPtrOutput() StartupConfigPtrOutput

func (StartupConfigOutput) ToStartupConfigPtrOutputWithContext added in v0.32.0

func (o StartupConfigOutput) ToStartupConfigPtrOutputWithContext(ctx context.Context) StartupConfigPtrOutput

type StartupConfigPtrInput added in v0.32.0

type StartupConfigPtrInput interface {
	pulumi.Input

	ToStartupConfigPtrOutput() StartupConfigPtrOutput
	ToStartupConfigPtrOutputWithContext(context.Context) StartupConfigPtrOutput
}

StartupConfigPtrInput is an input type that accepts StartupConfigArgs, StartupConfigPtr and StartupConfigPtrOutput values. You can construct a concrete instance of `StartupConfigPtrInput` via:

        StartupConfigArgs{...}

or:

        nil

func StartupConfigPtr added in v0.32.0

func StartupConfigPtr(v *StartupConfigArgs) StartupConfigPtrInput

type StartupConfigPtrOutput added in v0.32.0

type StartupConfigPtrOutput struct{ *pulumi.OutputState }

func (StartupConfigPtrOutput) Elem added in v0.32.0

func (StartupConfigPtrOutput) ElementType added in v0.32.0

func (StartupConfigPtrOutput) ElementType() reflect.Type

func (StartupConfigPtrOutput) RequiredRegistrationFraction added in v0.32.0

func (o StartupConfigPtrOutput) RequiredRegistrationFraction() pulumi.Float64PtrOutput

Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled).

func (StartupConfigPtrOutput) ToStartupConfigPtrOutput added in v0.32.0

func (o StartupConfigPtrOutput) ToStartupConfigPtrOutput() StartupConfigPtrOutput

func (StartupConfigPtrOutput) ToStartupConfigPtrOutputWithContext added in v0.32.0

func (o StartupConfigPtrOutput) ToStartupConfigPtrOutputWithContext(ctx context.Context) StartupConfigPtrOutput

type StartupConfigResponse added in v0.32.0

type StartupConfigResponse struct {
	// Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled).
	RequiredRegistrationFraction float64 `pulumi:"requiredRegistrationFraction"`
}

Configuration to handle the startup of instances during cluster create and update process.

type StartupConfigResponseOutput added in v0.32.0

type StartupConfigResponseOutput struct{ *pulumi.OutputState }

Configuration to handle the startup of instances during cluster create and update process.

func (StartupConfigResponseOutput) ElementType added in v0.32.0

func (StartupConfigResponseOutput) RequiredRegistrationFraction added in v0.32.0

func (o StartupConfigResponseOutput) RequiredRegistrationFraction() pulumi.Float64Output

Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled).

func (StartupConfigResponseOutput) ToStartupConfigResponseOutput added in v0.32.0

func (o StartupConfigResponseOutput) ToStartupConfigResponseOutput() StartupConfigResponseOutput

func (StartupConfigResponseOutput) ToStartupConfigResponseOutputWithContext added in v0.32.0

func (o StartupConfigResponseOutput) ToStartupConfigResponseOutputWithContext(ctx context.Context) StartupConfigResponseOutput

type StateHistoryResponse added in v0.12.0

type StateHistoryResponse struct {
	// The state of the batch at this point in history.
	State string `pulumi:"state"`
	// Details about the state at this point in history.
	StateMessage string `pulumi:"stateMessage"`
	// The time when the batch entered the historical state.
	StateStartTime string `pulumi:"stateStartTime"`
}

Historical state information.

type StateHistoryResponseArrayOutput added in v0.12.0

type StateHistoryResponseArrayOutput struct{ *pulumi.OutputState }

func (StateHistoryResponseArrayOutput) ElementType added in v0.12.0

func (StateHistoryResponseArrayOutput) Index added in v0.12.0

func (StateHistoryResponseArrayOutput) ToStateHistoryResponseArrayOutput added in v0.12.0

func (o StateHistoryResponseArrayOutput) ToStateHistoryResponseArrayOutput() StateHistoryResponseArrayOutput

func (StateHistoryResponseArrayOutput) ToStateHistoryResponseArrayOutputWithContext added in v0.12.0

func (o StateHistoryResponseArrayOutput) ToStateHistoryResponseArrayOutputWithContext(ctx context.Context) StateHistoryResponseArrayOutput

type StateHistoryResponseOutput added in v0.12.0

type StateHistoryResponseOutput struct{ *pulumi.OutputState }

Historical state information.

func (StateHistoryResponseOutput) ElementType added in v0.12.0

func (StateHistoryResponseOutput) ElementType() reflect.Type

func (StateHistoryResponseOutput) State added in v0.12.0

The state of the batch at this point in history.

func (StateHistoryResponseOutput) StateMessage added in v0.12.0

Details about the state at this point in history.

func (StateHistoryResponseOutput) StateStartTime added in v0.12.0

func (o StateHistoryResponseOutput) StateStartTime() pulumi.StringOutput

The time when the batch entered the historical state.

func (StateHistoryResponseOutput) ToStateHistoryResponseOutput added in v0.12.0

func (o StateHistoryResponseOutput) ToStateHistoryResponseOutput() StateHistoryResponseOutput

func (StateHistoryResponseOutput) ToStateHistoryResponseOutputWithContext added in v0.12.0

func (o StateHistoryResponseOutput) ToStateHistoryResponseOutputWithContext(ctx context.Context) StateHistoryResponseOutput

type TemplateParameter

type TemplateParameter struct {
	// Optional. Brief description of the parameter. Must not exceed 1024 characters.
	Description *string `pulumi:"description"`
	// Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths.A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as placement.clusterSelector.zone.Also, field paths can reference fields using the following syntax: Values in maps can be referenced by key: labels'key' placement.clusterSelector.clusterLabels'key' placement.managedCluster.labels'key' placement.clusterSelector.clusterLabels'key' jobs'step-id'.labels'key' Jobs in the jobs list can be referenced by step-id: jobs'step-id'.hadoopJob.mainJarFileUri jobs'step-id'.hiveJob.queryFileUri jobs'step-id'.pySparkJob.mainPythonFileUri jobs'step-id'.hadoopJob.jarFileUris0 jobs'step-id'.hadoopJob.archiveUris0 jobs'step-id'.hadoopJob.fileUris0 jobs'step-id'.pySparkJob.pythonFileUris0 Items in repeated fields can be referenced by a zero-based index: jobs'step-id'.sparkJob.args0 Other examples: jobs'step-id'.hadoopJob.properties'key' jobs'step-id'.hadoopJob.args0 jobs'step-id'.hiveJob.scriptVariables'key' jobs'step-id'.hadoopJob.mainJarFileUri placement.clusterSelector.zoneIt may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args
	Fields []string `pulumi:"fields"`
	// Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
	Name string `pulumi:"name"`
	// Optional. Validation rules to be applied to this parameter's value.
	Validation *ParameterValidation `pulumi:"validation"`
}

A configurable parameter that replaces one or more fields in the template. Parameterizable fields: - Labels - File uris - Job properties - Job arguments - Script variables - Main class (in HadoopJob and SparkJob) - Zone (in ClusterSelector)

type TemplateParameterArgs

type TemplateParameterArgs struct {
	// Optional. Brief description of the parameter. Must not exceed 1024 characters.
	Description pulumi.StringPtrInput `pulumi:"description"`
	// Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths.A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as placement.clusterSelector.zone.Also, field paths can reference fields using the following syntax: Values in maps can be referenced by key: labels'key' placement.clusterSelector.clusterLabels'key' placement.managedCluster.labels'key' placement.clusterSelector.clusterLabels'key' jobs'step-id'.labels'key' Jobs in the jobs list can be referenced by step-id: jobs'step-id'.hadoopJob.mainJarFileUri jobs'step-id'.hiveJob.queryFileUri jobs'step-id'.pySparkJob.mainPythonFileUri jobs'step-id'.hadoopJob.jarFileUris0 jobs'step-id'.hadoopJob.archiveUris0 jobs'step-id'.hadoopJob.fileUris0 jobs'step-id'.pySparkJob.pythonFileUris0 Items in repeated fields can be referenced by a zero-based index: jobs'step-id'.sparkJob.args0 Other examples: jobs'step-id'.hadoopJob.properties'key' jobs'step-id'.hadoopJob.args0 jobs'step-id'.hiveJob.scriptVariables'key' jobs'step-id'.hadoopJob.mainJarFileUri placement.clusterSelector.zoneIt may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args
	Fields pulumi.StringArrayInput `pulumi:"fields"`
	// Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
	Name pulumi.StringInput `pulumi:"name"`
	// Optional. Validation rules to be applied to this parameter's value.
	Validation ParameterValidationPtrInput `pulumi:"validation"`
}

A configurable parameter that replaces one or more fields in the template. Parameterizable fields: - Labels - File uris - Job properties - Job arguments - Script variables - Main class (in HadoopJob and SparkJob) - Zone (in ClusterSelector)

func (TemplateParameterArgs) ElementType

func (TemplateParameterArgs) ElementType() reflect.Type

func (TemplateParameterArgs) ToTemplateParameterOutput

func (i TemplateParameterArgs) ToTemplateParameterOutput() TemplateParameterOutput

func (TemplateParameterArgs) ToTemplateParameterOutputWithContext

func (i TemplateParameterArgs) ToTemplateParameterOutputWithContext(ctx context.Context) TemplateParameterOutput

type TemplateParameterArray

type TemplateParameterArray []TemplateParameterInput

func (TemplateParameterArray) ElementType

func (TemplateParameterArray) ElementType() reflect.Type

func (TemplateParameterArray) ToTemplateParameterArrayOutput

func (i TemplateParameterArray) ToTemplateParameterArrayOutput() TemplateParameterArrayOutput

func (TemplateParameterArray) ToTemplateParameterArrayOutputWithContext

func (i TemplateParameterArray) ToTemplateParameterArrayOutputWithContext(ctx context.Context) TemplateParameterArrayOutput

type TemplateParameterArrayInput

type TemplateParameterArrayInput interface {
	pulumi.Input

	ToTemplateParameterArrayOutput() TemplateParameterArrayOutput
	ToTemplateParameterArrayOutputWithContext(context.Context) TemplateParameterArrayOutput
}

TemplateParameterArrayInput is an input type that accepts TemplateParameterArray and TemplateParameterArrayOutput values. You can construct a concrete instance of `TemplateParameterArrayInput` via:

TemplateParameterArray{ TemplateParameterArgs{...} }

type TemplateParameterArrayOutput

type TemplateParameterArrayOutput struct{ *pulumi.OutputState }

func (TemplateParameterArrayOutput) ElementType

func (TemplateParameterArrayOutput) Index

func (TemplateParameterArrayOutput) ToTemplateParameterArrayOutput

func (o TemplateParameterArrayOutput) ToTemplateParameterArrayOutput() TemplateParameterArrayOutput

func (TemplateParameterArrayOutput) ToTemplateParameterArrayOutputWithContext

func (o TemplateParameterArrayOutput) ToTemplateParameterArrayOutputWithContext(ctx context.Context) TemplateParameterArrayOutput

type TemplateParameterInput

type TemplateParameterInput interface {
	pulumi.Input

	ToTemplateParameterOutput() TemplateParameterOutput
	ToTemplateParameterOutputWithContext(context.Context) TemplateParameterOutput
}

TemplateParameterInput is an input type that accepts TemplateParameterArgs and TemplateParameterOutput values. You can construct a concrete instance of `TemplateParameterInput` via:

TemplateParameterArgs{...}

type TemplateParameterOutput

type TemplateParameterOutput struct{ *pulumi.OutputState }

A configurable parameter that replaces one or more fields in the template. Parameterizable fields: - Labels - File uris - Job properties - Job arguments - Script variables - Main class (in HadoopJob and SparkJob) - Zone (in ClusterSelector)

func (TemplateParameterOutput) Description

Optional. Brief description of the parameter. Must not exceed 1024 characters.

func (TemplateParameterOutput) ElementType

func (TemplateParameterOutput) ElementType() reflect.Type

func (TemplateParameterOutput) Fields

Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths.A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as placement.clusterSelector.zone.Also, field paths can reference fields using the following syntax: Values in maps can be referenced by key: labels'key' placement.clusterSelector.clusterLabels'key' placement.managedCluster.labels'key' placement.clusterSelector.clusterLabels'key' jobs'step-id'.labels'key' Jobs in the jobs list can be referenced by step-id: jobs'step-id'.hadoopJob.mainJarFileUri jobs'step-id'.hiveJob.queryFileUri jobs'step-id'.pySparkJob.mainPythonFileUri jobs'step-id'.hadoopJob.jarFileUris0 jobs'step-id'.hadoopJob.archiveUris0 jobs'step-id'.hadoopJob.fileUris0 jobs'step-id'.pySparkJob.pythonFileUris0 Items in repeated fields can be referenced by a zero-based index: jobs'step-id'.sparkJob.args0 Other examples: jobs'step-id'.hadoopJob.properties'key' jobs'step-id'.hadoopJob.args0 jobs'step-id'.hiveJob.scriptVariables'key' jobs'step-id'.hadoopJob.mainJarFileUri placement.clusterSelector.zoneIt may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args

func (TemplateParameterOutput) Name

Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.

func (TemplateParameterOutput) ToTemplateParameterOutput

func (o TemplateParameterOutput) ToTemplateParameterOutput() TemplateParameterOutput

func (TemplateParameterOutput) ToTemplateParameterOutputWithContext

func (o TemplateParameterOutput) ToTemplateParameterOutputWithContext(ctx context.Context) TemplateParameterOutput

func (TemplateParameterOutput) Validation

Optional. Validation rules to be applied to this parameter's value.

type TemplateParameterResponse

type TemplateParameterResponse struct {
	// Optional. Brief description of the parameter. Must not exceed 1024 characters.
	Description string `pulumi:"description"`
	// Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths.A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as placement.clusterSelector.zone.Also, field paths can reference fields using the following syntax: Values in maps can be referenced by key: labels'key' placement.clusterSelector.clusterLabels'key' placement.managedCluster.labels'key' placement.clusterSelector.clusterLabels'key' jobs'step-id'.labels'key' Jobs in the jobs list can be referenced by step-id: jobs'step-id'.hadoopJob.mainJarFileUri jobs'step-id'.hiveJob.queryFileUri jobs'step-id'.pySparkJob.mainPythonFileUri jobs'step-id'.hadoopJob.jarFileUris0 jobs'step-id'.hadoopJob.archiveUris0 jobs'step-id'.hadoopJob.fileUris0 jobs'step-id'.pySparkJob.pythonFileUris0 Items in repeated fields can be referenced by a zero-based index: jobs'step-id'.sparkJob.args0 Other examples: jobs'step-id'.hadoopJob.properties'key' jobs'step-id'.hadoopJob.args0 jobs'step-id'.hiveJob.scriptVariables'key' jobs'step-id'.hadoopJob.mainJarFileUri placement.clusterSelector.zoneIt may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args
	Fields []string `pulumi:"fields"`
	// Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
	Name string `pulumi:"name"`
	// Optional. Validation rules to be applied to this parameter's value.
	Validation ParameterValidationResponse `pulumi:"validation"`
}

A configurable parameter that replaces one or more fields in the template. Parameterizable fields: - Labels - File uris - Job properties - Job arguments - Script variables - Main class (in HadoopJob and SparkJob) - Zone (in ClusterSelector)

type TemplateParameterResponseArrayOutput

type TemplateParameterResponseArrayOutput struct{ *pulumi.OutputState }

func (TemplateParameterResponseArrayOutput) ElementType

func (TemplateParameterResponseArrayOutput) Index

func (TemplateParameterResponseArrayOutput) ToTemplateParameterResponseArrayOutput

func (o TemplateParameterResponseArrayOutput) ToTemplateParameterResponseArrayOutput() TemplateParameterResponseArrayOutput

func (TemplateParameterResponseArrayOutput) ToTemplateParameterResponseArrayOutputWithContext

func (o TemplateParameterResponseArrayOutput) ToTemplateParameterResponseArrayOutputWithContext(ctx context.Context) TemplateParameterResponseArrayOutput

type TemplateParameterResponseOutput

type TemplateParameterResponseOutput struct{ *pulumi.OutputState }

A configurable parameter that replaces one or more fields in the template. Parameterizable fields: - Labels - File uris - Job properties - Job arguments - Script variables - Main class (in HadoopJob and SparkJob) - Zone (in ClusterSelector)

func (TemplateParameterResponseOutput) Description

Optional. Brief description of the parameter. Must not exceed 1024 characters.

func (TemplateParameterResponseOutput) ElementType

func (TemplateParameterResponseOutput) Fields

Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths.A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as placement.clusterSelector.zone.Also, field paths can reference fields using the following syntax: Values in maps can be referenced by key: labels'key' placement.clusterSelector.clusterLabels'key' placement.managedCluster.labels'key' placement.clusterSelector.clusterLabels'key' jobs'step-id'.labels'key' Jobs in the jobs list can be referenced by step-id: jobs'step-id'.hadoopJob.mainJarFileUri jobs'step-id'.hiveJob.queryFileUri jobs'step-id'.pySparkJob.mainPythonFileUri jobs'step-id'.hadoopJob.jarFileUris0 jobs'step-id'.hadoopJob.archiveUris0 jobs'step-id'.hadoopJob.fileUris0 jobs'step-id'.pySparkJob.pythonFileUris0 Items in repeated fields can be referenced by a zero-based index: jobs'step-id'.sparkJob.args0 Other examples: jobs'step-id'.hadoopJob.properties'key' jobs'step-id'.hadoopJob.args0 jobs'step-id'.hiveJob.scriptVariables'key' jobs'step-id'.hadoopJob.mainJarFileUri placement.clusterSelector.zoneIt may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args

func (TemplateParameterResponseOutput) Name

Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.

func (TemplateParameterResponseOutput) ToTemplateParameterResponseOutput

func (o TemplateParameterResponseOutput) ToTemplateParameterResponseOutput() TemplateParameterResponseOutput

func (TemplateParameterResponseOutput) ToTemplateParameterResponseOutputWithContext

func (o TemplateParameterResponseOutput) ToTemplateParameterResponseOutputWithContext(ctx context.Context) TemplateParameterResponseOutput

func (TemplateParameterResponseOutput) Validation

Optional. Validation rules to be applied to this parameter's value.

type TrinoJob added in v0.26.0

type TrinoJob struct {
	// Optional. Trino client tags to attach to this query
	ClientTags []string `pulumi:"clientTags"`
	// Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
	ContinueOnFailure *bool `pulumi:"continueOnFailure"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *LoggingConfig `pulumi:"loggingConfig"`
	// Optional. The format in which query output will be displayed. See the Trino documentation for supported output formats
	OutputFormat *string `pulumi:"outputFormat"`
	// Optional. A mapping of property names to values. Used to set Trino session properties (https://trino.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Trino CLI
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri *string `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList *QueryList `pulumi:"queryList"`
}

A Dataproc job for running Trino (https://trino.io/) queries. IMPORTANT: The Dataproc Trino Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/trino) must be enabled when the cluster is created to submit a Trino job to the cluster.

type TrinoJobArgs added in v0.26.0

type TrinoJobArgs struct {
	// Optional. Trino client tags to attach to this query
	ClientTags pulumi.StringArrayInput `pulumi:"clientTags"`
	// Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
	ContinueOnFailure pulumi.BoolPtrInput `pulumi:"continueOnFailure"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigPtrInput `pulumi:"loggingConfig"`
	// Optional. The format in which query output will be displayed. See the Trino documentation for supported output formats
	OutputFormat pulumi.StringPtrInput `pulumi:"outputFormat"`
	// Optional. A mapping of property names to values. Used to set Trino session properties (https://trino.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Trino CLI
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri pulumi.StringPtrInput `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList QueryListPtrInput `pulumi:"queryList"`
}

A Dataproc job for running Trino (https://trino.io/) queries. IMPORTANT: The Dataproc Trino Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/trino) must be enabled when the cluster is created to submit a Trino job to the cluster.

func (TrinoJobArgs) ElementType added in v0.26.0

func (TrinoJobArgs) ElementType() reflect.Type

func (TrinoJobArgs) ToTrinoJobOutput added in v0.26.0

func (i TrinoJobArgs) ToTrinoJobOutput() TrinoJobOutput

func (TrinoJobArgs) ToTrinoJobOutputWithContext added in v0.26.0

func (i TrinoJobArgs) ToTrinoJobOutputWithContext(ctx context.Context) TrinoJobOutput

func (TrinoJobArgs) ToTrinoJobPtrOutput added in v0.26.0

func (i TrinoJobArgs) ToTrinoJobPtrOutput() TrinoJobPtrOutput

func (TrinoJobArgs) ToTrinoJobPtrOutputWithContext added in v0.26.0

func (i TrinoJobArgs) ToTrinoJobPtrOutputWithContext(ctx context.Context) TrinoJobPtrOutput

type TrinoJobInput added in v0.26.0

type TrinoJobInput interface {
	pulumi.Input

	ToTrinoJobOutput() TrinoJobOutput
	ToTrinoJobOutputWithContext(context.Context) TrinoJobOutput
}

TrinoJobInput is an input type that accepts TrinoJobArgs and TrinoJobOutput values. You can construct a concrete instance of `TrinoJobInput` via:

TrinoJobArgs{...}

type TrinoJobOutput added in v0.26.0

type TrinoJobOutput struct{ *pulumi.OutputState }

A Dataproc job for running Trino (https://trino.io/) queries. IMPORTANT: The Dataproc Trino Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/trino) must be enabled when the cluster is created to submit a Trino job to the cluster.

func (TrinoJobOutput) ClientTags added in v0.26.0

func (o TrinoJobOutput) ClientTags() pulumi.StringArrayOutput

Optional. Trino client tags to attach to this query

func (TrinoJobOutput) ContinueOnFailure added in v0.26.0

func (o TrinoJobOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.

func (TrinoJobOutput) ElementType added in v0.26.0

func (TrinoJobOutput) ElementType() reflect.Type

func (TrinoJobOutput) LoggingConfig added in v0.26.0

func (o TrinoJobOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (TrinoJobOutput) OutputFormat added in v0.26.0

func (o TrinoJobOutput) OutputFormat() pulumi.StringPtrOutput

Optional. The format in which query output will be displayed. See the Trino documentation for supported output formats

func (TrinoJobOutput) Properties added in v0.26.0

func (o TrinoJobOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names to values. Used to set Trino session properties (https://trino.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Trino CLI

func (TrinoJobOutput) QueryFileUri added in v0.26.0

func (o TrinoJobOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains SQL queries.

func (TrinoJobOutput) QueryList added in v0.26.0

func (o TrinoJobOutput) QueryList() QueryListPtrOutput

A list of queries.

func (TrinoJobOutput) ToTrinoJobOutput added in v0.26.0

func (o TrinoJobOutput) ToTrinoJobOutput() TrinoJobOutput

func (TrinoJobOutput) ToTrinoJobOutputWithContext added in v0.26.0

func (o TrinoJobOutput) ToTrinoJobOutputWithContext(ctx context.Context) TrinoJobOutput

func (TrinoJobOutput) ToTrinoJobPtrOutput added in v0.26.0

func (o TrinoJobOutput) ToTrinoJobPtrOutput() TrinoJobPtrOutput

func (TrinoJobOutput) ToTrinoJobPtrOutputWithContext added in v0.26.0

func (o TrinoJobOutput) ToTrinoJobPtrOutputWithContext(ctx context.Context) TrinoJobPtrOutput

type TrinoJobPtrInput added in v0.26.0

type TrinoJobPtrInput interface {
	pulumi.Input

	ToTrinoJobPtrOutput() TrinoJobPtrOutput
	ToTrinoJobPtrOutputWithContext(context.Context) TrinoJobPtrOutput
}

TrinoJobPtrInput is an input type that accepts TrinoJobArgs, TrinoJobPtr and TrinoJobPtrOutput values. You can construct a concrete instance of `TrinoJobPtrInput` via:

        TrinoJobArgs{...}

or:

        nil

func TrinoJobPtr added in v0.26.0

func TrinoJobPtr(v *TrinoJobArgs) TrinoJobPtrInput

type TrinoJobPtrOutput added in v0.26.0

type TrinoJobPtrOutput struct{ *pulumi.OutputState }

func (TrinoJobPtrOutput) ClientTags added in v0.26.0

Optional. Trino client tags to attach to this query

func (TrinoJobPtrOutput) ContinueOnFailure added in v0.26.0

func (o TrinoJobPtrOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.

func (TrinoJobPtrOutput) Elem added in v0.26.0

func (TrinoJobPtrOutput) ElementType added in v0.26.0

func (TrinoJobPtrOutput) ElementType() reflect.Type

func (TrinoJobPtrOutput) LoggingConfig added in v0.26.0

func (o TrinoJobPtrOutput) LoggingConfig() LoggingConfigPtrOutput

Optional. The runtime log config for job execution.

func (TrinoJobPtrOutput) OutputFormat added in v0.26.0

func (o TrinoJobPtrOutput) OutputFormat() pulumi.StringPtrOutput

Optional. The format in which query output will be displayed. See the Trino documentation for supported output formats

func (TrinoJobPtrOutput) Properties added in v0.26.0

func (o TrinoJobPtrOutput) Properties() pulumi.StringMapOutput

Optional. A mapping of property names to values. Used to set Trino session properties (https://trino.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Trino CLI

func (TrinoJobPtrOutput) QueryFileUri added in v0.26.0

func (o TrinoJobPtrOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains SQL queries.

func (TrinoJobPtrOutput) QueryList added in v0.26.0

func (o TrinoJobPtrOutput) QueryList() QueryListPtrOutput

A list of queries.

func (TrinoJobPtrOutput) ToTrinoJobPtrOutput added in v0.26.0

func (o TrinoJobPtrOutput) ToTrinoJobPtrOutput() TrinoJobPtrOutput

func (TrinoJobPtrOutput) ToTrinoJobPtrOutputWithContext added in v0.26.0

func (o TrinoJobPtrOutput) ToTrinoJobPtrOutputWithContext(ctx context.Context) TrinoJobPtrOutput

type TrinoJobResponse added in v0.26.0

type TrinoJobResponse struct {
	// Optional. Trino client tags to attach to this query
	ClientTags []string `pulumi:"clientTags"`
	// Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
	ContinueOnFailure bool `pulumi:"continueOnFailure"`
	// Optional. The runtime log config for job execution.
	LoggingConfig LoggingConfigResponse `pulumi:"loggingConfig"`
	// Optional. The format in which query output will be displayed. See the Trino documentation for supported output formats
	OutputFormat string `pulumi:"outputFormat"`
	// Optional. A mapping of property names to values. Used to set Trino session properties (https://trino.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Trino CLI
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri string `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList QueryListResponse `pulumi:"queryList"`
}

A Dataproc job for running Trino (https://trino.io/) queries. IMPORTANT: The Dataproc Trino Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/trino) must be enabled when the cluster is created to submit a Trino job to the cluster.

type TrinoJobResponseOutput added in v0.26.0

type TrinoJobResponseOutput struct{ *pulumi.OutputState }

A Dataproc job for running Trino (https://trino.io/) queries. IMPORTANT: The Dataproc Trino Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/trino) must be enabled when the cluster is created to submit a Trino job to the cluster.

func (TrinoJobResponseOutput) ClientTags added in v0.26.0

Optional. Trino client tags to attach to this query

func (TrinoJobResponseOutput) ContinueOnFailure added in v0.26.0

func (o TrinoJobResponseOutput) ContinueOnFailure() pulumi.BoolOutput

Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.

func (TrinoJobResponseOutput) ElementType added in v0.26.0

func (TrinoJobResponseOutput) ElementType() reflect.Type

func (TrinoJobResponseOutput) LoggingConfig added in v0.26.0

Optional. The runtime log config for job execution.

func (TrinoJobResponseOutput) OutputFormat added in v0.26.0

func (o TrinoJobResponseOutput) OutputFormat() pulumi.StringOutput

Optional. The format in which query output will be displayed. See the Trino documentation for supported output formats

func (TrinoJobResponseOutput) Properties added in v0.26.0

Optional. A mapping of property names to values. Used to set Trino session properties (https://trino.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Trino CLI

func (TrinoJobResponseOutput) QueryFileUri added in v0.26.0

func (o TrinoJobResponseOutput) QueryFileUri() pulumi.StringOutput

The HCFS URI of the script that contains SQL queries.

func (TrinoJobResponseOutput) QueryList added in v0.26.0

A list of queries.

func (TrinoJobResponseOutput) ToTrinoJobResponseOutput added in v0.26.0

func (o TrinoJobResponseOutput) ToTrinoJobResponseOutput() TrinoJobResponseOutput

func (TrinoJobResponseOutput) ToTrinoJobResponseOutputWithContext added in v0.26.0

func (o TrinoJobResponseOutput) ToTrinoJobResponseOutputWithContext(ctx context.Context) TrinoJobResponseOutput

type UsageMetricsResponse added in v0.28.0

type UsageMetricsResponse struct {
	// Optional. Accelerator type being used, if any
	AcceleratorType string `pulumi:"acceleratorType"`
	// Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).
	MilliAcceleratorSeconds string `pulumi:"milliAcceleratorSeconds"`
	// Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).
	MilliDcuSeconds string `pulumi:"milliDcuSeconds"`
	// Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).
	ShuffleStorageGbSeconds string `pulumi:"shuffleStorageGbSeconds"`
}

Usage metrics represent approximate total resources consumed by a workload.

type UsageMetricsResponseOutput added in v0.28.0

type UsageMetricsResponseOutput struct{ *pulumi.OutputState }

Usage metrics represent approximate total resources consumed by a workload.

func (UsageMetricsResponseOutput) AcceleratorType added in v0.32.0

func (o UsageMetricsResponseOutput) AcceleratorType() pulumi.StringOutput

Optional. Accelerator type being used, if any

func (UsageMetricsResponseOutput) ElementType added in v0.28.0

func (UsageMetricsResponseOutput) ElementType() reflect.Type

func (UsageMetricsResponseOutput) MilliAcceleratorSeconds added in v0.32.0

func (o UsageMetricsResponseOutput) MilliAcceleratorSeconds() pulumi.StringOutput

Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).

func (UsageMetricsResponseOutput) MilliDcuSeconds added in v0.28.0

func (o UsageMetricsResponseOutput) MilliDcuSeconds() pulumi.StringOutput

Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).

func (UsageMetricsResponseOutput) ShuffleStorageGbSeconds added in v0.28.0

func (o UsageMetricsResponseOutput) ShuffleStorageGbSeconds() pulumi.StringOutput

Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).

func (UsageMetricsResponseOutput) ToUsageMetricsResponseOutput added in v0.28.0

func (o UsageMetricsResponseOutput) ToUsageMetricsResponseOutput() UsageMetricsResponseOutput

func (UsageMetricsResponseOutput) ToUsageMetricsResponseOutputWithContext added in v0.28.0

func (o UsageMetricsResponseOutput) ToUsageMetricsResponseOutputWithContext(ctx context.Context) UsageMetricsResponseOutput

type UsageSnapshotResponse added in v0.28.0

type UsageSnapshotResponse struct {
	// Optional. Accelerator type being used, if any
	AcceleratorType string `pulumi:"acceleratorType"`
	// Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing))
	MilliAccelerator string `pulumi:"milliAccelerator"`
	// Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).
	MilliDcu string `pulumi:"milliDcu"`
	// Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).
	MilliDcuPremium string `pulumi:"milliDcuPremium"`
	// Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing))
	ShuffleStorageGb string `pulumi:"shuffleStorageGb"`
	// Optional. Shuffle Storage in gigabytes (GB) charged at premium tier. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing))
	ShuffleStorageGbPremium string `pulumi:"shuffleStorageGbPremium"`
	// Optional. The timestamp of the usage snapshot.
	SnapshotTime string `pulumi:"snapshotTime"`
}

The usage snapshot represents the resources consumed by a workload at a specified time.

type UsageSnapshotResponseOutput added in v0.28.0

type UsageSnapshotResponseOutput struct{ *pulumi.OutputState }

The usage snapshot represents the resources consumed by a workload at a specified time.

func (UsageSnapshotResponseOutput) AcceleratorType added in v0.32.0

func (o UsageSnapshotResponseOutput) AcceleratorType() pulumi.StringOutput

Optional. Accelerator type being used, if any

func (UsageSnapshotResponseOutput) ElementType added in v0.28.0

func (UsageSnapshotResponseOutput) MilliAccelerator added in v0.32.0

func (o UsageSnapshotResponseOutput) MilliAccelerator() pulumi.StringOutput

Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing))

func (UsageSnapshotResponseOutput) MilliDcu added in v0.28.0

Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).

func (UsageSnapshotResponseOutput) MilliDcuPremium added in v0.32.0

func (o UsageSnapshotResponseOutput) MilliDcuPremium() pulumi.StringOutput

Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).

func (UsageSnapshotResponseOutput) ShuffleStorageGb added in v0.28.0

func (o UsageSnapshotResponseOutput) ShuffleStorageGb() pulumi.StringOutput

Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing))

func (UsageSnapshotResponseOutput) ShuffleStorageGbPremium added in v0.32.0

func (o UsageSnapshotResponseOutput) ShuffleStorageGbPremium() pulumi.StringOutput

Optional. Shuffle Storage in gigabytes (GB) charged at premium tier. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing))

func (UsageSnapshotResponseOutput) SnapshotTime added in v0.28.0

Optional. The timestamp of the usage snapshot.

func (UsageSnapshotResponseOutput) ToUsageSnapshotResponseOutput added in v0.28.0

func (o UsageSnapshotResponseOutput) ToUsageSnapshotResponseOutput() UsageSnapshotResponseOutput

func (UsageSnapshotResponseOutput) ToUsageSnapshotResponseOutputWithContext added in v0.28.0

func (o UsageSnapshotResponseOutput) ToUsageSnapshotResponseOutputWithContext(ctx context.Context) UsageSnapshotResponseOutput

type ValueValidation

type ValueValidation struct {
	// List of allowed values for the parameter.
	Values []string `pulumi:"values"`
}

Validation based on a list of allowed values.

type ValueValidationArgs

type ValueValidationArgs struct {
	// List of allowed values for the parameter.
	Values pulumi.StringArrayInput `pulumi:"values"`
}

Validation based on a list of allowed values.

func (ValueValidationArgs) ElementType

func (ValueValidationArgs) ElementType() reflect.Type

func (ValueValidationArgs) ToValueValidationOutput

func (i ValueValidationArgs) ToValueValidationOutput() ValueValidationOutput

func (ValueValidationArgs) ToValueValidationOutputWithContext

func (i ValueValidationArgs) ToValueValidationOutputWithContext(ctx context.Context) ValueValidationOutput

func (ValueValidationArgs) ToValueValidationPtrOutput

func (i ValueValidationArgs) ToValueValidationPtrOutput() ValueValidationPtrOutput

func (ValueValidationArgs) ToValueValidationPtrOutputWithContext

func (i ValueValidationArgs) ToValueValidationPtrOutputWithContext(ctx context.Context) ValueValidationPtrOutput

type ValueValidationInput

type ValueValidationInput interface {
	pulumi.Input

	ToValueValidationOutput() ValueValidationOutput
	ToValueValidationOutputWithContext(context.Context) ValueValidationOutput
}

ValueValidationInput is an input type that accepts ValueValidationArgs and ValueValidationOutput values. You can construct a concrete instance of `ValueValidationInput` via:

ValueValidationArgs{...}

type ValueValidationOutput

type ValueValidationOutput struct{ *pulumi.OutputState }

Validation based on a list of allowed values.

func (ValueValidationOutput) ElementType

func (ValueValidationOutput) ElementType() reflect.Type

func (ValueValidationOutput) ToValueValidationOutput

func (o ValueValidationOutput) ToValueValidationOutput() ValueValidationOutput

func (ValueValidationOutput) ToValueValidationOutputWithContext

func (o ValueValidationOutput) ToValueValidationOutputWithContext(ctx context.Context) ValueValidationOutput

func (ValueValidationOutput) ToValueValidationPtrOutput

func (o ValueValidationOutput) ToValueValidationPtrOutput() ValueValidationPtrOutput

func (ValueValidationOutput) ToValueValidationPtrOutputWithContext

func (o ValueValidationOutput) ToValueValidationPtrOutputWithContext(ctx context.Context) ValueValidationPtrOutput

func (ValueValidationOutput) Values

List of allowed values for the parameter.

type ValueValidationPtrInput

type ValueValidationPtrInput interface {
	pulumi.Input

	ToValueValidationPtrOutput() ValueValidationPtrOutput
	ToValueValidationPtrOutputWithContext(context.Context) ValueValidationPtrOutput
}

ValueValidationPtrInput is an input type that accepts ValueValidationArgs, ValueValidationPtr and ValueValidationPtrOutput values. You can construct a concrete instance of `ValueValidationPtrInput` via:

        ValueValidationArgs{...}

or:

        nil

type ValueValidationPtrOutput

type ValueValidationPtrOutput struct{ *pulumi.OutputState }

func (ValueValidationPtrOutput) Elem

func (ValueValidationPtrOutput) ElementType

func (ValueValidationPtrOutput) ElementType() reflect.Type

func (ValueValidationPtrOutput) ToValueValidationPtrOutput

func (o ValueValidationPtrOutput) ToValueValidationPtrOutput() ValueValidationPtrOutput

func (ValueValidationPtrOutput) ToValueValidationPtrOutputWithContext

func (o ValueValidationPtrOutput) ToValueValidationPtrOutputWithContext(ctx context.Context) ValueValidationPtrOutput

func (ValueValidationPtrOutput) Values

List of allowed values for the parameter.

type ValueValidationResponse

type ValueValidationResponse struct {
	// List of allowed values for the parameter.
	Values []string `pulumi:"values"`
}

Validation based on a list of allowed values.

type ValueValidationResponseOutput

type ValueValidationResponseOutput struct{ *pulumi.OutputState }

Validation based on a list of allowed values.

func (ValueValidationResponseOutput) ElementType

func (ValueValidationResponseOutput) ToValueValidationResponseOutput

func (o ValueValidationResponseOutput) ToValueValidationResponseOutput() ValueValidationResponseOutput

func (ValueValidationResponseOutput) ToValueValidationResponseOutputWithContext

func (o ValueValidationResponseOutput) ToValueValidationResponseOutputWithContext(ctx context.Context) ValueValidationResponseOutput

func (ValueValidationResponseOutput) Values

List of allowed values for the parameter.

type VirtualClusterConfig added in v0.18.2

type VirtualClusterConfig struct {
	// Optional. Configuration of auxiliary services used by this cluster.
	AuxiliaryServicesConfig *AuxiliaryServicesConfig `pulumi:"auxiliaryServicesConfig"`
	// The configuration for running the Dataproc cluster on Kubernetes.
	KubernetesClusterConfig KubernetesClusterConfig `pulumi:"kubernetesClusterConfig"`
	// Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
	StagingBucket *string `pulumi:"stagingBucket"`
}

The Dataproc cluster config for a cluster that does not directly control the underlying compute resources, such as a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview).

type VirtualClusterConfigArgs added in v0.18.2

type VirtualClusterConfigArgs struct {
	// Optional. Configuration of auxiliary services used by this cluster.
	AuxiliaryServicesConfig AuxiliaryServicesConfigPtrInput `pulumi:"auxiliaryServicesConfig"`
	// The configuration for running the Dataproc cluster on Kubernetes.
	KubernetesClusterConfig KubernetesClusterConfigInput `pulumi:"kubernetesClusterConfig"`
	// Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
	StagingBucket pulumi.StringPtrInput `pulumi:"stagingBucket"`
}

The Dataproc cluster config for a cluster that does not directly control the underlying compute resources, such as a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview).

func (VirtualClusterConfigArgs) ElementType added in v0.18.2

func (VirtualClusterConfigArgs) ElementType() reflect.Type

func (VirtualClusterConfigArgs) ToVirtualClusterConfigOutput added in v0.18.2

func (i VirtualClusterConfigArgs) ToVirtualClusterConfigOutput() VirtualClusterConfigOutput

func (VirtualClusterConfigArgs) ToVirtualClusterConfigOutputWithContext added in v0.18.2

func (i VirtualClusterConfigArgs) ToVirtualClusterConfigOutputWithContext(ctx context.Context) VirtualClusterConfigOutput

func (VirtualClusterConfigArgs) ToVirtualClusterConfigPtrOutput added in v0.18.2

func (i VirtualClusterConfigArgs) ToVirtualClusterConfigPtrOutput() VirtualClusterConfigPtrOutput

func (VirtualClusterConfigArgs) ToVirtualClusterConfigPtrOutputWithContext added in v0.18.2

func (i VirtualClusterConfigArgs) ToVirtualClusterConfigPtrOutputWithContext(ctx context.Context) VirtualClusterConfigPtrOutput

type VirtualClusterConfigInput added in v0.18.2

type VirtualClusterConfigInput interface {
	pulumi.Input

	ToVirtualClusterConfigOutput() VirtualClusterConfigOutput
	ToVirtualClusterConfigOutputWithContext(context.Context) VirtualClusterConfigOutput
}

VirtualClusterConfigInput is an input type that accepts VirtualClusterConfigArgs and VirtualClusterConfigOutput values. You can construct a concrete instance of `VirtualClusterConfigInput` via:

VirtualClusterConfigArgs{...}

type VirtualClusterConfigOutput added in v0.18.2

type VirtualClusterConfigOutput struct{ *pulumi.OutputState }

The Dataproc cluster config for a cluster that does not directly control the underlying compute resources, such as a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview).

func (VirtualClusterConfigOutput) AuxiliaryServicesConfig added in v0.18.2

Optional. Configuration of auxiliary services used by this cluster.

func (VirtualClusterConfigOutput) ElementType added in v0.18.2

func (VirtualClusterConfigOutput) ElementType() reflect.Type

func (VirtualClusterConfigOutput) KubernetesClusterConfig added in v0.18.2

func (o VirtualClusterConfigOutput) KubernetesClusterConfig() KubernetesClusterConfigOutput

The configuration for running the Dataproc cluster on Kubernetes.

func (VirtualClusterConfigOutput) StagingBucket added in v0.18.2

Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.

func (VirtualClusterConfigOutput) ToVirtualClusterConfigOutput added in v0.18.2

func (o VirtualClusterConfigOutput) ToVirtualClusterConfigOutput() VirtualClusterConfigOutput

func (VirtualClusterConfigOutput) ToVirtualClusterConfigOutputWithContext added in v0.18.2

func (o VirtualClusterConfigOutput) ToVirtualClusterConfigOutputWithContext(ctx context.Context) VirtualClusterConfigOutput

func (VirtualClusterConfigOutput) ToVirtualClusterConfigPtrOutput added in v0.18.2

func (o VirtualClusterConfigOutput) ToVirtualClusterConfigPtrOutput() VirtualClusterConfigPtrOutput

func (VirtualClusterConfigOutput) ToVirtualClusterConfigPtrOutputWithContext added in v0.18.2

func (o VirtualClusterConfigOutput) ToVirtualClusterConfigPtrOutputWithContext(ctx context.Context) VirtualClusterConfigPtrOutput

type VirtualClusterConfigPtrInput added in v0.18.2

type VirtualClusterConfigPtrInput interface {
	pulumi.Input

	ToVirtualClusterConfigPtrOutput() VirtualClusterConfigPtrOutput
	ToVirtualClusterConfigPtrOutputWithContext(context.Context) VirtualClusterConfigPtrOutput
}

VirtualClusterConfigPtrInput is an input type that accepts VirtualClusterConfigArgs, VirtualClusterConfigPtr and VirtualClusterConfigPtrOutput values. You can construct a concrete instance of `VirtualClusterConfigPtrInput` via:

        VirtualClusterConfigArgs{...}

or:

        nil

func VirtualClusterConfigPtr added in v0.18.2

func VirtualClusterConfigPtr(v *VirtualClusterConfigArgs) VirtualClusterConfigPtrInput

type VirtualClusterConfigPtrOutput added in v0.18.2

type VirtualClusterConfigPtrOutput struct{ *pulumi.OutputState }

func (VirtualClusterConfigPtrOutput) AuxiliaryServicesConfig added in v0.18.2

Optional. Configuration of auxiliary services used by this cluster.

func (VirtualClusterConfigPtrOutput) Elem added in v0.18.2

func (VirtualClusterConfigPtrOutput) ElementType added in v0.18.2

func (VirtualClusterConfigPtrOutput) KubernetesClusterConfig added in v0.18.2

The configuration for running the Dataproc cluster on Kubernetes.

func (VirtualClusterConfigPtrOutput) StagingBucket added in v0.18.2

Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.

func (VirtualClusterConfigPtrOutput) ToVirtualClusterConfigPtrOutput added in v0.18.2

func (o VirtualClusterConfigPtrOutput) ToVirtualClusterConfigPtrOutput() VirtualClusterConfigPtrOutput

func (VirtualClusterConfigPtrOutput) ToVirtualClusterConfigPtrOutputWithContext added in v0.18.2

func (o VirtualClusterConfigPtrOutput) ToVirtualClusterConfigPtrOutputWithContext(ctx context.Context) VirtualClusterConfigPtrOutput

type VirtualClusterConfigResponse added in v0.18.2

type VirtualClusterConfigResponse struct {
	// Optional. Configuration of auxiliary services used by this cluster.
	AuxiliaryServicesConfig AuxiliaryServicesConfigResponse `pulumi:"auxiliaryServicesConfig"`
	// The configuration for running the Dataproc cluster on Kubernetes.
	KubernetesClusterConfig KubernetesClusterConfigResponse `pulumi:"kubernetesClusterConfig"`
	// Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
	StagingBucket string `pulumi:"stagingBucket"`
}

The Dataproc cluster config for a cluster that does not directly control the underlying compute resources, such as a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview).

type VirtualClusterConfigResponseOutput added in v0.18.2

type VirtualClusterConfigResponseOutput struct{ *pulumi.OutputState }

The Dataproc cluster config for a cluster that does not directly control the underlying compute resources, such as a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview).

func (VirtualClusterConfigResponseOutput) AuxiliaryServicesConfig added in v0.18.2

Optional. Configuration of auxiliary services used by this cluster.

func (VirtualClusterConfigResponseOutput) ElementType added in v0.18.2

func (VirtualClusterConfigResponseOutput) KubernetesClusterConfig added in v0.18.2

The configuration for running the Dataproc cluster on Kubernetes.

func (VirtualClusterConfigResponseOutput) StagingBucket added in v0.18.2

Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.

func (VirtualClusterConfigResponseOutput) ToVirtualClusterConfigResponseOutput added in v0.18.2

func (o VirtualClusterConfigResponseOutput) ToVirtualClusterConfigResponseOutput() VirtualClusterConfigResponseOutput

func (VirtualClusterConfigResponseOutput) ToVirtualClusterConfigResponseOutputWithContext added in v0.18.2

func (o VirtualClusterConfigResponseOutput) ToVirtualClusterConfigResponseOutputWithContext(ctx context.Context) VirtualClusterConfigResponseOutput

type WorkflowTemplate

type WorkflowTemplate struct {
	pulumi.CustomResourceState

	// The time template was created.
	CreateTime pulumi.StringOutput `pulumi:"createTime"`
	// Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
	DagTimeout pulumi.StringOutput `pulumi:"dagTimeout"`
	// Optional. Encryption settings for the encrypting customer core content.
	EncryptionConfig GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigResponseOutput `pulumi:"encryptionConfig"`
	// The Directed Acyclic Graph of Jobs to submit.
	Jobs OrderedJobResponseArrayOutput `pulumi:"jobs"`
	// Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance.Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be associated with a template.
	Labels   pulumi.StringMapOutput `pulumi:"labels"`
	Location pulumi.StringOutput    `pulumi:"location"`
	// The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
	Name pulumi.StringOutput `pulumi:"name"`
	// Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
	Parameters TemplateParameterResponseArrayOutput `pulumi:"parameters"`
	// WorkflowTemplate scheduling information.
	Placement WorkflowTemplatePlacementResponseOutput `pulumi:"placement"`
	Project   pulumi.StringOutput                     `pulumi:"project"`
	// The time template was last updated.
	UpdateTime pulumi.StringOutput `pulumi:"updateTime"`
	// Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request.
	Version pulumi.IntOutput `pulumi:"version"`
}

Creates new workflow template. Auto-naming is currently not supported for this resource.

func GetWorkflowTemplate

func GetWorkflowTemplate(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *WorkflowTemplateState, opts ...pulumi.ResourceOption) (*WorkflowTemplate, error)

GetWorkflowTemplate gets an existing WorkflowTemplate resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewWorkflowTemplate

func NewWorkflowTemplate(ctx *pulumi.Context,
	name string, args *WorkflowTemplateArgs, opts ...pulumi.ResourceOption) (*WorkflowTemplate, error)

NewWorkflowTemplate registers a new resource with the given unique name, arguments, and options.

func (*WorkflowTemplate) ElementType

func (*WorkflowTemplate) ElementType() reflect.Type

func (*WorkflowTemplate) ToWorkflowTemplateOutput

func (i *WorkflowTemplate) ToWorkflowTemplateOutput() WorkflowTemplateOutput

func (*WorkflowTemplate) ToWorkflowTemplateOutputWithContext

func (i *WorkflowTemplate) ToWorkflowTemplateOutputWithContext(ctx context.Context) WorkflowTemplateOutput

type WorkflowTemplateArgs

type WorkflowTemplateArgs struct {
	// Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
	DagTimeout pulumi.StringPtrInput
	// Optional. Encryption settings for the encrypting customer core content.
	EncryptionConfig GoogleCloudDataprocV1WorkflowTemplateEncryptionConfigPtrInput
	Id               pulumi.StringPtrInput
	// The Directed Acyclic Graph of Jobs to submit.
	Jobs OrderedJobArrayInput
	// Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance.Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be associated with a template.
	Labels   pulumi.StringMapInput
	Location pulumi.StringPtrInput
	// Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
	Parameters TemplateParameterArrayInput
	// WorkflowTemplate scheduling information.
	Placement WorkflowTemplatePlacementInput
	Project   pulumi.StringPtrInput
	// Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request.
	Version pulumi.IntPtrInput
}

The set of arguments for constructing a WorkflowTemplate resource.

func (WorkflowTemplateArgs) ElementType

func (WorkflowTemplateArgs) ElementType() reflect.Type

type WorkflowTemplateIamBinding added in v0.26.0

type WorkflowTemplateIamBinding struct {
	pulumi.CustomResourceState

	// An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
	Condition iam.ConditionPtrOutput `pulumi:"condition"`
	// The etag of the resource's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Members pulumi.StringArrayOutput `pulumi:"members"`
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringOutput `pulumi:"name"`
	// The project in which the resource belongs. If it is not provided, a default will be supplied.
	Project pulumi.StringOutput `pulumi:"project"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role pulumi.StringOutput `pulumi:"role"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.

func GetWorkflowTemplateIamBinding added in v0.26.0

func GetWorkflowTemplateIamBinding(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *WorkflowTemplateIamBindingState, opts ...pulumi.ResourceOption) (*WorkflowTemplateIamBinding, error)

GetWorkflowTemplateIamBinding gets an existing WorkflowTemplateIamBinding resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewWorkflowTemplateIamBinding added in v0.26.0

func NewWorkflowTemplateIamBinding(ctx *pulumi.Context,
	name string, args *WorkflowTemplateIamBindingArgs, opts ...pulumi.ResourceOption) (*WorkflowTemplateIamBinding, error)

NewWorkflowTemplateIamBinding registers a new resource with the given unique name, arguments, and options.

func (*WorkflowTemplateIamBinding) ElementType added in v0.26.0

func (*WorkflowTemplateIamBinding) ElementType() reflect.Type

func (*WorkflowTemplateIamBinding) ToWorkflowTemplateIamBindingOutput added in v0.26.0

func (i *WorkflowTemplateIamBinding) ToWorkflowTemplateIamBindingOutput() WorkflowTemplateIamBindingOutput

func (*WorkflowTemplateIamBinding) ToWorkflowTemplateIamBindingOutputWithContext added in v0.26.0

func (i *WorkflowTemplateIamBinding) ToWorkflowTemplateIamBindingOutputWithContext(ctx context.Context) WorkflowTemplateIamBindingOutput

type WorkflowTemplateIamBindingArgs added in v0.26.0

type WorkflowTemplateIamBindingArgs struct {
	// An IAM Condition for a given binding.
	Condition iam.ConditionPtrInput
	// Identities that will be granted the privilege in role. Each entry can have one of the following values:
	//
	//  * user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
	//  * serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
	//  * group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
	//  * domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
	Members pulumi.StringArrayInput
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringInput
	// The role that should be applied. Only one `IamBinding` can be used per role.
	Role pulumi.StringInput
}

The set of arguments for constructing a WorkflowTemplateIamBinding resource.

func (WorkflowTemplateIamBindingArgs) ElementType added in v0.26.0

type WorkflowTemplateIamBindingInput added in v0.26.0

type WorkflowTemplateIamBindingInput interface {
	pulumi.Input

	ToWorkflowTemplateIamBindingOutput() WorkflowTemplateIamBindingOutput
	ToWorkflowTemplateIamBindingOutputWithContext(ctx context.Context) WorkflowTemplateIamBindingOutput
}

type WorkflowTemplateIamBindingOutput added in v0.26.0

type WorkflowTemplateIamBindingOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateIamBindingOutput) Condition added in v0.26.0

An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.

func (WorkflowTemplateIamBindingOutput) ElementType added in v0.26.0

func (WorkflowTemplateIamBindingOutput) Etag added in v0.26.0

The etag of the resource's IAM policy.

func (WorkflowTemplateIamBindingOutput) Members added in v0.26.0

Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.

func (WorkflowTemplateIamBindingOutput) Name added in v0.26.0

The name of the resource to manage IAM policies for.

func (WorkflowTemplateIamBindingOutput) Project added in v0.26.0

The project in which the resource belongs. If it is not provided, a default will be supplied.

func (WorkflowTemplateIamBindingOutput) Role added in v0.26.0

Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.

func (WorkflowTemplateIamBindingOutput) ToWorkflowTemplateIamBindingOutput added in v0.26.0

func (o WorkflowTemplateIamBindingOutput) ToWorkflowTemplateIamBindingOutput() WorkflowTemplateIamBindingOutput

func (WorkflowTemplateIamBindingOutput) ToWorkflowTemplateIamBindingOutputWithContext added in v0.26.0

func (o WorkflowTemplateIamBindingOutput) ToWorkflowTemplateIamBindingOutputWithContext(ctx context.Context) WorkflowTemplateIamBindingOutput

type WorkflowTemplateIamBindingState added in v0.26.0

type WorkflowTemplateIamBindingState struct {
}

func (WorkflowTemplateIamBindingState) ElementType added in v0.26.0

type WorkflowTemplateIamMember added in v0.26.0

type WorkflowTemplateIamMember struct {
	pulumi.CustomResourceState

	// An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
	Condition iam.ConditionPtrOutput `pulumi:"condition"`
	// The etag of the resource's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.
	Member pulumi.StringOutput `pulumi:"member"`
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringOutput `pulumi:"name"`
	// The project in which the resource belongs. If it is not provided, a default will be supplied.
	Project pulumi.StringOutput `pulumi:"project"`
	// Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
	Role pulumi.StringOutput `pulumi:"role"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.

func GetWorkflowTemplateIamMember added in v0.26.0

func GetWorkflowTemplateIamMember(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *WorkflowTemplateIamMemberState, opts ...pulumi.ResourceOption) (*WorkflowTemplateIamMember, error)

GetWorkflowTemplateIamMember gets an existing WorkflowTemplateIamMember resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewWorkflowTemplateIamMember added in v0.26.0

func NewWorkflowTemplateIamMember(ctx *pulumi.Context,
	name string, args *WorkflowTemplateIamMemberArgs, opts ...pulumi.ResourceOption) (*WorkflowTemplateIamMember, error)

NewWorkflowTemplateIamMember registers a new resource with the given unique name, arguments, and options.

func (*WorkflowTemplateIamMember) ElementType added in v0.26.0

func (*WorkflowTemplateIamMember) ElementType() reflect.Type

func (*WorkflowTemplateIamMember) ToWorkflowTemplateIamMemberOutput added in v0.26.0

func (i *WorkflowTemplateIamMember) ToWorkflowTemplateIamMemberOutput() WorkflowTemplateIamMemberOutput

func (*WorkflowTemplateIamMember) ToWorkflowTemplateIamMemberOutputWithContext added in v0.26.0

func (i *WorkflowTemplateIamMember) ToWorkflowTemplateIamMemberOutputWithContext(ctx context.Context) WorkflowTemplateIamMemberOutput

type WorkflowTemplateIamMemberArgs added in v0.26.0

type WorkflowTemplateIamMemberArgs struct {
	// An IAM Condition for a given binding.
	Condition iam.ConditionPtrInput
	// Identity that will be granted the privilege in role. The entry can have one of the following values:
	//
	//  * user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
	//  * serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
	//  * group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
	//  * domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
	Member pulumi.StringInput
	// The name of the resource to manage IAM policies for.
	Name pulumi.StringInput
	// The role that should be applied.
	Role pulumi.StringInput
}

The set of arguments for constructing a WorkflowTemplateIamMember resource.

func (WorkflowTemplateIamMemberArgs) ElementType added in v0.26.0

type WorkflowTemplateIamMemberInput added in v0.26.0

type WorkflowTemplateIamMemberInput interface {
	pulumi.Input

	ToWorkflowTemplateIamMemberOutput() WorkflowTemplateIamMemberOutput
	ToWorkflowTemplateIamMemberOutputWithContext(ctx context.Context) WorkflowTemplateIamMemberOutput
}

type WorkflowTemplateIamMemberOutput added in v0.26.0

type WorkflowTemplateIamMemberOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateIamMemberOutput) Condition added in v0.26.0

An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.

func (WorkflowTemplateIamMemberOutput) ElementType added in v0.26.0

func (WorkflowTemplateIamMemberOutput) Etag added in v0.26.0

The etag of the resource's IAM policy.

func (WorkflowTemplateIamMemberOutput) Member added in v0.26.0

Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.

func (WorkflowTemplateIamMemberOutput) Name added in v0.26.0

The name of the resource to manage IAM policies for.

func (WorkflowTemplateIamMemberOutput) Project added in v0.26.0

The project in which the resource belongs. If it is not provided, a default will be supplied.

func (WorkflowTemplateIamMemberOutput) Role added in v0.26.0

Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.

func (WorkflowTemplateIamMemberOutput) ToWorkflowTemplateIamMemberOutput added in v0.26.0

func (o WorkflowTemplateIamMemberOutput) ToWorkflowTemplateIamMemberOutput() WorkflowTemplateIamMemberOutput

func (WorkflowTemplateIamMemberOutput) ToWorkflowTemplateIamMemberOutputWithContext added in v0.26.0

func (o WorkflowTemplateIamMemberOutput) ToWorkflowTemplateIamMemberOutputWithContext(ctx context.Context) WorkflowTemplateIamMemberOutput

type WorkflowTemplateIamMemberState added in v0.26.0

type WorkflowTemplateIamMemberState struct {
}

func (WorkflowTemplateIamMemberState) ElementType added in v0.26.0

type WorkflowTemplateIamPolicy

type WorkflowTemplateIamPolicy struct {
	pulumi.CustomResourceState

	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings BindingResponseArrayOutput `pulumi:"bindings"`
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag     pulumi.StringOutput `pulumi:"etag"`
	Location pulumi.StringOutput `pulumi:"location"`
	Project  pulumi.StringOutput `pulumi:"project"`
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version            pulumi.IntOutput    `pulumi:"version"`
	WorkflowTemplateId pulumi.StringOutput `pulumi:"workflowTemplateId"`
}

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors. Note - this resource's API doesn't support deletion. When deleted, the resource will persist on Google Cloud even though it will be deleted from Pulumi state.

func GetWorkflowTemplateIamPolicy

func GetWorkflowTemplateIamPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *WorkflowTemplateIamPolicyState, opts ...pulumi.ResourceOption) (*WorkflowTemplateIamPolicy, error)

GetWorkflowTemplateIamPolicy gets an existing WorkflowTemplateIamPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewWorkflowTemplateIamPolicy

func NewWorkflowTemplateIamPolicy(ctx *pulumi.Context,
	name string, args *WorkflowTemplateIamPolicyArgs, opts ...pulumi.ResourceOption) (*WorkflowTemplateIamPolicy, error)

NewWorkflowTemplateIamPolicy registers a new resource with the given unique name, arguments, and options.

func (*WorkflowTemplateIamPolicy) ElementType

func (*WorkflowTemplateIamPolicy) ElementType() reflect.Type

func (*WorkflowTemplateIamPolicy) ToWorkflowTemplateIamPolicyOutput

func (i *WorkflowTemplateIamPolicy) ToWorkflowTemplateIamPolicyOutput() WorkflowTemplateIamPolicyOutput

func (*WorkflowTemplateIamPolicy) ToWorkflowTemplateIamPolicyOutputWithContext

func (i *WorkflowTemplateIamPolicy) ToWorkflowTemplateIamPolicyOutputWithContext(ctx context.Context) WorkflowTemplateIamPolicyOutput

type WorkflowTemplateIamPolicyArgs

type WorkflowTemplateIamPolicyArgs struct {
	// Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.
	Bindings BindingArrayInput
	// etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.
	Etag     pulumi.StringPtrInput
	Location pulumi.StringPtrInput
	Project  pulumi.StringPtrInput
	// Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
	Version            pulumi.IntPtrInput
	WorkflowTemplateId pulumi.StringInput
}

The set of arguments for constructing a WorkflowTemplateIamPolicy resource.

func (WorkflowTemplateIamPolicyArgs) ElementType

type WorkflowTemplateIamPolicyInput

type WorkflowTemplateIamPolicyInput interface {
	pulumi.Input

	ToWorkflowTemplateIamPolicyOutput() WorkflowTemplateIamPolicyOutput
	ToWorkflowTemplateIamPolicyOutputWithContext(ctx context.Context) WorkflowTemplateIamPolicyOutput
}

type WorkflowTemplateIamPolicyOutput

type WorkflowTemplateIamPolicyOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateIamPolicyOutput) Bindings added in v0.19.0

Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.

func (WorkflowTemplateIamPolicyOutput) ElementType

func (WorkflowTemplateIamPolicyOutput) Etag added in v0.19.0

etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.

func (WorkflowTemplateIamPolicyOutput) Location added in v0.21.0

func (WorkflowTemplateIamPolicyOutput) Project added in v0.21.0

func (WorkflowTemplateIamPolicyOutput) ToWorkflowTemplateIamPolicyOutput

func (o WorkflowTemplateIamPolicyOutput) ToWorkflowTemplateIamPolicyOutput() WorkflowTemplateIamPolicyOutput

func (WorkflowTemplateIamPolicyOutput) ToWorkflowTemplateIamPolicyOutputWithContext

func (o WorkflowTemplateIamPolicyOutput) ToWorkflowTemplateIamPolicyOutputWithContext(ctx context.Context) WorkflowTemplateIamPolicyOutput

func (WorkflowTemplateIamPolicyOutput) Version added in v0.19.0

Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).

func (WorkflowTemplateIamPolicyOutput) WorkflowTemplateId added in v0.21.0

func (o WorkflowTemplateIamPolicyOutput) WorkflowTemplateId() pulumi.StringOutput

type WorkflowTemplateIamPolicyState

type WorkflowTemplateIamPolicyState struct {
}

func (WorkflowTemplateIamPolicyState) ElementType

type WorkflowTemplateInput

type WorkflowTemplateInput interface {
	pulumi.Input

	ToWorkflowTemplateOutput() WorkflowTemplateOutput
	ToWorkflowTemplateOutputWithContext(ctx context.Context) WorkflowTemplateOutput
}

type WorkflowTemplateOutput

type WorkflowTemplateOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateOutput) CreateTime added in v0.19.0

The time template was created.

func (WorkflowTemplateOutput) DagTimeout added in v0.19.0

Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.

func (WorkflowTemplateOutput) ElementType

func (WorkflowTemplateOutput) ElementType() reflect.Type

func (WorkflowTemplateOutput) EncryptionConfig added in v0.32.0

Optional. Encryption settings for the encrypting customer core content.

func (WorkflowTemplateOutput) Jobs added in v0.19.0

The Directed Acyclic Graph of Jobs to submit.

func (WorkflowTemplateOutput) Labels added in v0.19.0

Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance.Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be associated with a template.

func (WorkflowTemplateOutput) Location added in v0.21.0

func (WorkflowTemplateOutput) Name added in v0.19.0

The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}

func (WorkflowTemplateOutput) Parameters added in v0.19.0

Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.

func (WorkflowTemplateOutput) Placement added in v0.19.0

WorkflowTemplate scheduling information.

func (WorkflowTemplateOutput) Project added in v0.21.0

func (WorkflowTemplateOutput) ToWorkflowTemplateOutput

func (o WorkflowTemplateOutput) ToWorkflowTemplateOutput() WorkflowTemplateOutput

func (WorkflowTemplateOutput) ToWorkflowTemplateOutputWithContext

func (o WorkflowTemplateOutput) ToWorkflowTemplateOutputWithContext(ctx context.Context) WorkflowTemplateOutput

func (WorkflowTemplateOutput) UpdateTime added in v0.19.0

The time template was last updated.

func (WorkflowTemplateOutput) Version added in v0.19.0

Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request.

type WorkflowTemplatePlacement

type WorkflowTemplatePlacement struct {
	// Optional. A selector that chooses target cluster for jobs based on metadata.The selector is evaluated at the time each job is submitted.
	ClusterSelector *ClusterSelector `pulumi:"clusterSelector"`
	// A cluster that is managed by the workflow.
	ManagedCluster *ManagedCluster `pulumi:"managedCluster"`
}

Specifies workflow execution target.Either managed_cluster or cluster_selector is required.

type WorkflowTemplatePlacementArgs

type WorkflowTemplatePlacementArgs struct {
	// Optional. A selector that chooses target cluster for jobs based on metadata.The selector is evaluated at the time each job is submitted.
	ClusterSelector ClusterSelectorPtrInput `pulumi:"clusterSelector"`
	// A cluster that is managed by the workflow.
	ManagedCluster ManagedClusterPtrInput `pulumi:"managedCluster"`
}

Specifies workflow execution target.Either managed_cluster or cluster_selector is required.

func (WorkflowTemplatePlacementArgs) ElementType

func (WorkflowTemplatePlacementArgs) ToWorkflowTemplatePlacementOutput

func (i WorkflowTemplatePlacementArgs) ToWorkflowTemplatePlacementOutput() WorkflowTemplatePlacementOutput

func (WorkflowTemplatePlacementArgs) ToWorkflowTemplatePlacementOutputWithContext

func (i WorkflowTemplatePlacementArgs) ToWorkflowTemplatePlacementOutputWithContext(ctx context.Context) WorkflowTemplatePlacementOutput

type WorkflowTemplatePlacementInput

type WorkflowTemplatePlacementInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementOutput() WorkflowTemplatePlacementOutput
	ToWorkflowTemplatePlacementOutputWithContext(context.Context) WorkflowTemplatePlacementOutput
}

WorkflowTemplatePlacementInput is an input type that accepts WorkflowTemplatePlacementArgs and WorkflowTemplatePlacementOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementInput` via:

WorkflowTemplatePlacementArgs{...}

type WorkflowTemplatePlacementOutput

type WorkflowTemplatePlacementOutput struct{ *pulumi.OutputState }

Specifies workflow execution target.Either managed_cluster or cluster_selector is required.

func (WorkflowTemplatePlacementOutput) ClusterSelector

Optional. A selector that chooses target cluster for jobs based on metadata.The selector is evaluated at the time each job is submitted.

func (WorkflowTemplatePlacementOutput) ElementType

func (WorkflowTemplatePlacementOutput) ManagedCluster

A cluster that is managed by the workflow.

func (WorkflowTemplatePlacementOutput) ToWorkflowTemplatePlacementOutput

func (o WorkflowTemplatePlacementOutput) ToWorkflowTemplatePlacementOutput() WorkflowTemplatePlacementOutput

func (WorkflowTemplatePlacementOutput) ToWorkflowTemplatePlacementOutputWithContext

func (o WorkflowTemplatePlacementOutput) ToWorkflowTemplatePlacementOutputWithContext(ctx context.Context) WorkflowTemplatePlacementOutput

type WorkflowTemplatePlacementResponse

type WorkflowTemplatePlacementResponse struct {
	// Optional. A selector that chooses target cluster for jobs based on metadata.The selector is evaluated at the time each job is submitted.
	ClusterSelector ClusterSelectorResponse `pulumi:"clusterSelector"`
	// A cluster that is managed by the workflow.
	ManagedCluster ManagedClusterResponse `pulumi:"managedCluster"`
}

Specifies workflow execution target.Either managed_cluster or cluster_selector is required.

type WorkflowTemplatePlacementResponseOutput

type WorkflowTemplatePlacementResponseOutput struct{ *pulumi.OutputState }

Specifies workflow execution target.Either managed_cluster or cluster_selector is required.

func (WorkflowTemplatePlacementResponseOutput) ClusterSelector

Optional. A selector that chooses target cluster for jobs based on metadata.The selector is evaluated at the time each job is submitted.

func (WorkflowTemplatePlacementResponseOutput) ElementType

func (WorkflowTemplatePlacementResponseOutput) ManagedCluster

A cluster that is managed by the workflow.

func (WorkflowTemplatePlacementResponseOutput) ToWorkflowTemplatePlacementResponseOutput

func (o WorkflowTemplatePlacementResponseOutput) ToWorkflowTemplatePlacementResponseOutput() WorkflowTemplatePlacementResponseOutput

func (WorkflowTemplatePlacementResponseOutput) ToWorkflowTemplatePlacementResponseOutputWithContext

func (o WorkflowTemplatePlacementResponseOutput) ToWorkflowTemplatePlacementResponseOutputWithContext(ctx context.Context) WorkflowTemplatePlacementResponseOutput

type WorkflowTemplateState

type WorkflowTemplateState struct {
}

func (WorkflowTemplateState) ElementType

func (WorkflowTemplateState) ElementType() reflect.Type

type YarnApplicationResponse

type YarnApplicationResponse struct {
	// The application name.
	Name string `pulumi:"name"`
	// The numerical progress of the application, from 1 to 100.
	Progress float64 `pulumi:"progress"`
	// The application state.
	State string `pulumi:"state"`
	// Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
	TrackingUrl string `pulumi:"trackingUrl"`
}

A YARN application created by a job. Application information is a subset of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.Beta Feature: This report is available for testing purposes only. It may be changed before final release.

type YarnApplicationResponseArrayOutput

type YarnApplicationResponseArrayOutput struct{ *pulumi.OutputState }

func (YarnApplicationResponseArrayOutput) ElementType

func (YarnApplicationResponseArrayOutput) Index

func (YarnApplicationResponseArrayOutput) ToYarnApplicationResponseArrayOutput

func (o YarnApplicationResponseArrayOutput) ToYarnApplicationResponseArrayOutput() YarnApplicationResponseArrayOutput

func (YarnApplicationResponseArrayOutput) ToYarnApplicationResponseArrayOutputWithContext

func (o YarnApplicationResponseArrayOutput) ToYarnApplicationResponseArrayOutputWithContext(ctx context.Context) YarnApplicationResponseArrayOutput

type YarnApplicationResponseOutput

type YarnApplicationResponseOutput struct{ *pulumi.OutputState }

A YARN application created by a job. Application information is a subset of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.Beta Feature: This report is available for testing purposes only. It may be changed before final release.

func (YarnApplicationResponseOutput) ElementType

func (YarnApplicationResponseOutput) Name

The application name.

func (YarnApplicationResponseOutput) Progress

The numerical progress of the application, from 1 to 100.

func (YarnApplicationResponseOutput) State

The application state.

func (YarnApplicationResponseOutput) ToYarnApplicationResponseOutput

func (o YarnApplicationResponseOutput) ToYarnApplicationResponseOutput() YarnApplicationResponseOutput

func (YarnApplicationResponseOutput) ToYarnApplicationResponseOutputWithContext

func (o YarnApplicationResponseOutput) ToYarnApplicationResponseOutputWithContext(ctx context.Context) YarnApplicationResponseOutput

func (YarnApplicationResponseOutput) TrackingUrl

Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL