dli

package
v0.0.8 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jul 28, 2023 License: Apache-2.0 Imports: 11 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

func PkgVersion added in v0.0.2

func PkgVersion() (semver.Version, error)

PkgVersion uses reflection to determine the version of the current package. If a version cannot be determined, v1 will be assumed. The second return value is always nil.

Types

type Database

type Database struct {
	pulumi.CustomResourceState

	// Specifies the description of a queue.
	// Changing this parameter will create a new database resource.
	Description pulumi.StringPtrOutput `pulumi:"description"`
	// Specifies the enterprise project ID.
	// The value 0 indicates the default enterprise project. Changing this parameter will create a new database resource.
	EnterpriseProjectId pulumi.StringOutput `pulumi:"enterpriseProjectId"`
	// Specifies the database name. The name consists of 1 to 128 characters, starting
	// with a letter or digit. Only letters, digits and underscores (_) are allowed and the name cannot be all digits.
	// Changing this parameter will create a new database resource.
	Name pulumi.StringOutput `pulumi:"name"`
	// Specifies the name of the SQL database owner.
	// The owner must be IAM user.
	Owner pulumi.StringOutput `pulumi:"owner"`
	// Specifies the region in which to create the DLI database resource.
	// If omitted, the provider-level region will be used. Changing this parameter will create a new database resource.
	Region pulumi.StringOutput `pulumi:"region"`
}

Manages DLI SQL database resource within HuaweiCloud.

## Example Usage ### Create a database

```go package main

import (

"github.com/huaweicloud/pulumi-huaweicloud/sdk/go/huaweicloud/Dli"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		cfg := config.New(ctx, "")
		databaseName := cfg.RequireObject("databaseName")
		_, err := Dli.NewDatabase(ctx, "test", nil)
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

DLI SQL databases can be imported by their `name`, e.g.

```sh

$ pulumi import huaweicloud:Dli/database:Database test terraform_test

```

func GetDatabase

func GetDatabase(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *DatabaseState, opts ...pulumi.ResourceOption) (*Database, error)

GetDatabase gets an existing Database resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewDatabase

func NewDatabase(ctx *pulumi.Context,
	name string, args *DatabaseArgs, opts ...pulumi.ResourceOption) (*Database, error)

NewDatabase registers a new resource with the given unique name, arguments, and options.

func (*Database) ElementType

func (*Database) ElementType() reflect.Type

func (*Database) ToDatabaseOutput

func (i *Database) ToDatabaseOutput() DatabaseOutput

func (*Database) ToDatabaseOutputWithContext

func (i *Database) ToDatabaseOutputWithContext(ctx context.Context) DatabaseOutput

type DatabaseArgs

type DatabaseArgs struct {
	// Specifies the description of a queue.
	// Changing this parameter will create a new database resource.
	Description pulumi.StringPtrInput
	// Specifies the enterprise project ID.
	// The value 0 indicates the default enterprise project. Changing this parameter will create a new database resource.
	EnterpriseProjectId pulumi.StringPtrInput
	// Specifies the database name. The name consists of 1 to 128 characters, starting
	// with a letter or digit. Only letters, digits and underscores (_) are allowed and the name cannot be all digits.
	// Changing this parameter will create a new database resource.
	Name pulumi.StringPtrInput
	// Specifies the name of the SQL database owner.
	// The owner must be IAM user.
	Owner pulumi.StringPtrInput
	// Specifies the region in which to create the DLI database resource.
	// If omitted, the provider-level region will be used. Changing this parameter will create a new database resource.
	Region pulumi.StringPtrInput
}

The set of arguments for constructing a Database resource.

func (DatabaseArgs) ElementType

func (DatabaseArgs) ElementType() reflect.Type

type DatabaseArray

type DatabaseArray []DatabaseInput

func (DatabaseArray) ElementType

func (DatabaseArray) ElementType() reflect.Type

func (DatabaseArray) ToDatabaseArrayOutput

func (i DatabaseArray) ToDatabaseArrayOutput() DatabaseArrayOutput

func (DatabaseArray) ToDatabaseArrayOutputWithContext

func (i DatabaseArray) ToDatabaseArrayOutputWithContext(ctx context.Context) DatabaseArrayOutput

type DatabaseArrayInput

type DatabaseArrayInput interface {
	pulumi.Input

	ToDatabaseArrayOutput() DatabaseArrayOutput
	ToDatabaseArrayOutputWithContext(context.Context) DatabaseArrayOutput
}

DatabaseArrayInput is an input type that accepts DatabaseArray and DatabaseArrayOutput values. You can construct a concrete instance of `DatabaseArrayInput` via:

DatabaseArray{ DatabaseArgs{...} }

type DatabaseArrayOutput

type DatabaseArrayOutput struct{ *pulumi.OutputState }

func (DatabaseArrayOutput) ElementType

func (DatabaseArrayOutput) ElementType() reflect.Type

func (DatabaseArrayOutput) Index

func (DatabaseArrayOutput) ToDatabaseArrayOutput

func (o DatabaseArrayOutput) ToDatabaseArrayOutput() DatabaseArrayOutput

func (DatabaseArrayOutput) ToDatabaseArrayOutputWithContext

func (o DatabaseArrayOutput) ToDatabaseArrayOutputWithContext(ctx context.Context) DatabaseArrayOutput

type DatabaseInput

type DatabaseInput interface {
	pulumi.Input

	ToDatabaseOutput() DatabaseOutput
	ToDatabaseOutputWithContext(ctx context.Context) DatabaseOutput
}

type DatabaseMap

type DatabaseMap map[string]DatabaseInput

func (DatabaseMap) ElementType

func (DatabaseMap) ElementType() reflect.Type

func (DatabaseMap) ToDatabaseMapOutput

func (i DatabaseMap) ToDatabaseMapOutput() DatabaseMapOutput

func (DatabaseMap) ToDatabaseMapOutputWithContext

func (i DatabaseMap) ToDatabaseMapOutputWithContext(ctx context.Context) DatabaseMapOutput

type DatabaseMapInput

type DatabaseMapInput interface {
	pulumi.Input

	ToDatabaseMapOutput() DatabaseMapOutput
	ToDatabaseMapOutputWithContext(context.Context) DatabaseMapOutput
}

DatabaseMapInput is an input type that accepts DatabaseMap and DatabaseMapOutput values. You can construct a concrete instance of `DatabaseMapInput` via:

DatabaseMap{ "key": DatabaseArgs{...} }

type DatabaseMapOutput

type DatabaseMapOutput struct{ *pulumi.OutputState }

func (DatabaseMapOutput) ElementType

func (DatabaseMapOutput) ElementType() reflect.Type

func (DatabaseMapOutput) MapIndex

func (DatabaseMapOutput) ToDatabaseMapOutput

func (o DatabaseMapOutput) ToDatabaseMapOutput() DatabaseMapOutput

func (DatabaseMapOutput) ToDatabaseMapOutputWithContext

func (o DatabaseMapOutput) ToDatabaseMapOutputWithContext(ctx context.Context) DatabaseMapOutput

type DatabaseOutput

type DatabaseOutput struct{ *pulumi.OutputState }

func (DatabaseOutput) Description

func (o DatabaseOutput) Description() pulumi.StringPtrOutput

Specifies the description of a queue. Changing this parameter will create a new database resource.

func (DatabaseOutput) ElementType

func (DatabaseOutput) ElementType() reflect.Type

func (DatabaseOutput) EnterpriseProjectId

func (o DatabaseOutput) EnterpriseProjectId() pulumi.StringOutput

Specifies the enterprise project ID. The value 0 indicates the default enterprise project. Changing this parameter will create a new database resource.

func (DatabaseOutput) Name

Specifies the database name. The name consists of 1 to 128 characters, starting with a letter or digit. Only letters, digits and underscores (_) are allowed and the name cannot be all digits. Changing this parameter will create a new database resource.

func (DatabaseOutput) Owner

Specifies the name of the SQL database owner. The owner must be IAM user.

func (DatabaseOutput) Region

func (o DatabaseOutput) Region() pulumi.StringOutput

Specifies the region in which to create the DLI database resource. If omitted, the provider-level region will be used. Changing this parameter will create a new database resource.

func (DatabaseOutput) ToDatabaseOutput

func (o DatabaseOutput) ToDatabaseOutput() DatabaseOutput

func (DatabaseOutput) ToDatabaseOutputWithContext

func (o DatabaseOutput) ToDatabaseOutputWithContext(ctx context.Context) DatabaseOutput

type DatabaseState

type DatabaseState struct {
	// Specifies the description of a queue.
	// Changing this parameter will create a new database resource.
	Description pulumi.StringPtrInput
	// Specifies the enterprise project ID.
	// The value 0 indicates the default enterprise project. Changing this parameter will create a new database resource.
	EnterpriseProjectId pulumi.StringPtrInput
	// Specifies the database name. The name consists of 1 to 128 characters, starting
	// with a letter or digit. Only letters, digits and underscores (_) are allowed and the name cannot be all digits.
	// Changing this parameter will create a new database resource.
	Name pulumi.StringPtrInput
	// Specifies the name of the SQL database owner.
	// The owner must be IAM user.
	Owner pulumi.StringPtrInput
	// Specifies the region in which to create the DLI database resource.
	// If omitted, the provider-level region will be used. Changing this parameter will create a new database resource.
	Region pulumi.StringPtrInput
}

func (DatabaseState) ElementType

func (DatabaseState) ElementType() reflect.Type

type FlinkjarJob

type FlinkjarJob struct {
	pulumi.CustomResourceState

	// Specifies storage address of the checkpoint in the JAR file of the user.
	// The path must be unique.
	CheckpointPath pulumi.StringPtrOutput `pulumi:"checkpointPath"`
	// Specifies number of CUs selected for a job. The default value is `2`.
	CuNum pulumi.IntPtrOutput `pulumi:"cuNum"`
	// Specifies dependency files. It is the name of the package that has been
	// uploaded to the DLI.
	DependencyFiles pulumi.StringArrayOutput `pulumi:"dependencyFiles"`
	// Specifies other dependency jars. It is the name of the package that
	// has been uploaded to the DLI.
	DependencyJars pulumi.StringArrayOutput `pulumi:"dependencyJars"`
	// Specifies job description. Length range: 1 to 512 characters.
	Description pulumi.StringPtrOutput `pulumi:"description"`
	// Specifies the JAR file where the job main class is located. It is the name of the
	// package that has been uploaded to the DLI.
	Entrypoint pulumi.StringPtrOutput `pulumi:"entrypoint"`
	// Specifies job entry arguments. Multiple arguments are separated by spaces.
	// The arguments are keys followed by values. Keys have to start with '-' or '--'.
	EntrypointArgs pulumi.StringPtrOutput `pulumi:"entrypointArgs"`
	// Specifies job feature. Type of the Flink image used by a job.
	// + **basic**: indicates that the basic Flink image provided by DLI is used.
	// + **custom**: indicates that the user-defined Flink image is used.
	Feature pulumi.StringOutput `pulumi:"feature"`
	// Specifies flink version. This parameter is valid only when feature is set
	// to basic. You can use this parameter with the feature parameter to specify the version of the DLI basic Flink image
	// used for job running. The options are as follows: `1.10` and `1.7`.
	FlinkVersion pulumi.StringOutput `pulumi:"flinkVersion"`
	// Specifies custom image. The format is Organization name/Image name:Image version.
	// This parameter is valid only when feature is set to `custom`. You can use this parameter with the feature parameter
	// to specify a user-defined Flink image for job running. For details about how to use custom images, see the
	// Data Lake Insight User Guide <https://support.huaweicloud.com/en-us/usermanual-dli/dli_01_0494.html>.
	Image pulumi.StringPtrOutput `pulumi:"image"`
	// Specifies whether to enable the function of uploading job logs to users' OBS buckets.
	// The default value is `false`.
	LogEnabled pulumi.BoolPtrOutput `pulumi:"logEnabled"`
	// Specifies job entry class. Default main class is specified by the Manifest file
	// of the application.
	MainClass pulumi.StringPtrOutput `pulumi:"mainClass"`
	// Specifies number of CUs in the JobManager selected for a job.
	// The default value is `1`.
	ManagerCuNum pulumi.IntPtrOutput `pulumi:"managerCuNum"`
	// Specifies the name of the job. Length range: 1 to 57 characters.
	// Which may consist of letters, digits, underscores (_) and hyphens (-).
	Name pulumi.StringOutput `pulumi:"name"`
	// Specifies OBS path. OBS path where users are authorized to save the log.
	// This parameter is valid only when `logEnabled` is set to `true`.
	ObsBucket pulumi.StringPtrOutput `pulumi:"obsBucket"`
	// Specifies number of parallel for a job. The default value is `1`.
	ParallelNum pulumi.IntPtrOutput `pulumi:"parallelNum"`
	// Specifies the name of DLI queue which this job run in. The type of queue
	// must be `general`.
	QueueName pulumi.StringOutput `pulumi:"queueName"`
	// The region in which to create the DLI flink job resource. If omitted, the
	// provider-level region will be used. Changing this parameter will create a new resource.
	Region pulumi.StringOutput `pulumi:"region"`
	// Specifies whether to enable the function of restart upon exceptions.
	// The default value is `false`.
	RestartWhenException pulumi.BoolPtrOutput `pulumi:"restartWhenException"`
	// Specifies whether the abnormal restart is recovered from the checkpoint.
	ResumeCheckpoint pulumi.BoolPtrOutput `pulumi:"resumeCheckpoint"`
	// Specifies maximum number of retry times upon exceptions. The unit is
	// `times/hour`. Value range: `-1` or greater than `0`. The default value is `-1`, indicating that the number of times is
	// unlimited.
	ResumeMaxNum pulumi.IntPtrOutput `pulumi:"resumeMaxNum"`
	// Specifies customizes optimization parameters when a Flink job is running.
	RuntimeConfig pulumi.StringMapOutput `pulumi:"runtimeConfig"`
	// Specifies SMN topic. If a job fails, the system will send a message to users
	// subscribed to the SMN topic.
	SmnTopic pulumi.StringPtrOutput `pulumi:"smnTopic"`
	// The Job status.
	Status pulumi.StringOutput `pulumi:"status"`
	// Specifies the key/value pairs to associate with the resource.
	// Changing this parameter will create a new resource.
	Tags pulumi.StringMapOutput `pulumi:"tags"`
	// Specifies number of CUs for each TaskManager. The default value is `1`.
	TmCuNum pulumi.IntPtrOutput `pulumi:"tmCuNum"`
	// Specifies number of slots in each TaskManager.
	// The default value is `(parallel_num * tm_cu_num) / (cu_num - manager_cu_num)`.
	TmSlotNum pulumi.IntOutput `pulumi:"tmSlotNum"`
}

Manages a flink job resource which type is `Flink Jar` within HuaweiCloud DLI.

## Example Usage ### Create a flink job

```go package main

import (

"fmt"

"github.com/huaweicloud/pulumi-huaweicloud/sdk/go/huaweicloud/Dli"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		cfg := config.New(ctx, "")
		name := cfg.RequireObject("name")
		queueName := cfg.RequireObject("queueName")
		jarObsPath := cfg.RequireObject("jarObsPath")
		entrypointArgs := cfg.RequireObject("entrypointArgs")
		testPackage, err := Dli.NewPackage(ctx, "testPackage", &Dli.PackageArgs{
			GroupName:  pulumi.String("jarPackage"),
			Type:       pulumi.String("jar"),
			ObjectPath: pulumi.Any(jarObsPath),
		})
		if err != nil {
			return err
		}
		_, err = Dli.NewFlinkjarJob(ctx, "testFlinkjarJob", &Dli.FlinkjarJobArgs{
			QueueName: pulumi.Any(queueName),
			Entrypoint: pulumi.All(testPackage.GroupName, testPackage.ObjectName).ApplyT(func(_args []interface{}) (string, error) {
				groupName := _args[0].(string)
				objectName := _args[1].(string)
				return fmt.Sprintf("%v/%v", groupName, objectName), nil
			}).(pulumi.StringOutput),
			EntrypointArgs: pulumi.Any(entrypointArgs),
			Tags: pulumi.StringMap{
				"foo": pulumi.String("bar"),
				"key": pulumi.String("value"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

The job can be imported by `id`. For example,

```sh

$ pulumi import huaweicloud:Dli/flinkjarJob:FlinkjarJob test 12345

```

func GetFlinkjarJob

func GetFlinkjarJob(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *FlinkjarJobState, opts ...pulumi.ResourceOption) (*FlinkjarJob, error)

GetFlinkjarJob gets an existing FlinkjarJob resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewFlinkjarJob

func NewFlinkjarJob(ctx *pulumi.Context,
	name string, args *FlinkjarJobArgs, opts ...pulumi.ResourceOption) (*FlinkjarJob, error)

NewFlinkjarJob registers a new resource with the given unique name, arguments, and options.

func (*FlinkjarJob) ElementType

func (*FlinkjarJob) ElementType() reflect.Type

func (*FlinkjarJob) ToFlinkjarJobOutput

func (i *FlinkjarJob) ToFlinkjarJobOutput() FlinkjarJobOutput

func (*FlinkjarJob) ToFlinkjarJobOutputWithContext

func (i *FlinkjarJob) ToFlinkjarJobOutputWithContext(ctx context.Context) FlinkjarJobOutput

type FlinkjarJobArgs

type FlinkjarJobArgs struct {
	// Specifies storage address of the checkpoint in the JAR file of the user.
	// The path must be unique.
	CheckpointPath pulumi.StringPtrInput
	// Specifies number of CUs selected for a job. The default value is `2`.
	CuNum pulumi.IntPtrInput
	// Specifies dependency files. It is the name of the package that has been
	// uploaded to the DLI.
	DependencyFiles pulumi.StringArrayInput
	// Specifies other dependency jars. It is the name of the package that
	// has been uploaded to the DLI.
	DependencyJars pulumi.StringArrayInput
	// Specifies job description. Length range: 1 to 512 characters.
	Description pulumi.StringPtrInput
	// Specifies the JAR file where the job main class is located. It is the name of the
	// package that has been uploaded to the DLI.
	Entrypoint pulumi.StringPtrInput
	// Specifies job entry arguments. Multiple arguments are separated by spaces.
	// The arguments are keys followed by values. Keys have to start with '-' or '--'.
	EntrypointArgs pulumi.StringPtrInput
	// Specifies job feature. Type of the Flink image used by a job.
	// + **basic**: indicates that the basic Flink image provided by DLI is used.
	// + **custom**: indicates that the user-defined Flink image is used.
	Feature pulumi.StringPtrInput
	// Specifies flink version. This parameter is valid only when feature is set
	// to basic. You can use this parameter with the feature parameter to specify the version of the DLI basic Flink image
	// used for job running. The options are as follows: `1.10` and `1.7`.
	FlinkVersion pulumi.StringPtrInput
	// Specifies custom image. The format is Organization name/Image name:Image version.
	// This parameter is valid only when feature is set to `custom`. You can use this parameter with the feature parameter
	// to specify a user-defined Flink image for job running. For details about how to use custom images, see the
	// Data Lake Insight User Guide <https://support.huaweicloud.com/en-us/usermanual-dli/dli_01_0494.html>.
	Image pulumi.StringPtrInput
	// Specifies whether to enable the function of uploading job logs to users' OBS buckets.
	// The default value is `false`.
	LogEnabled pulumi.BoolPtrInput
	// Specifies job entry class. Default main class is specified by the Manifest file
	// of the application.
	MainClass pulumi.StringPtrInput
	// Specifies number of CUs in the JobManager selected for a job.
	// The default value is `1`.
	ManagerCuNum pulumi.IntPtrInput
	// Specifies the name of the job. Length range: 1 to 57 characters.
	// Which may consist of letters, digits, underscores (_) and hyphens (-).
	Name pulumi.StringPtrInput
	// Specifies OBS path. OBS path where users are authorized to save the log.
	// This parameter is valid only when `logEnabled` is set to `true`.
	ObsBucket pulumi.StringPtrInput
	// Specifies number of parallel for a job. The default value is `1`.
	ParallelNum pulumi.IntPtrInput
	// Specifies the name of DLI queue which this job run in. The type of queue
	// must be `general`.
	QueueName pulumi.StringPtrInput
	// The region in which to create the DLI flink job resource. If omitted, the
	// provider-level region will be used. Changing this parameter will create a new resource.
	Region pulumi.StringPtrInput
	// Specifies whether to enable the function of restart upon exceptions.
	// The default value is `false`.
	RestartWhenException pulumi.BoolPtrInput
	// Specifies whether the abnormal restart is recovered from the checkpoint.
	ResumeCheckpoint pulumi.BoolPtrInput
	// Specifies maximum number of retry times upon exceptions. The unit is
	// `times/hour`. Value range: `-1` or greater than `0`. The default value is `-1`, indicating that the number of times is
	// unlimited.
	ResumeMaxNum pulumi.IntPtrInput
	// Specifies customizes optimization parameters when a Flink job is running.
	RuntimeConfig pulumi.StringMapInput
	// Specifies SMN topic. If a job fails, the system will send a message to users
	// subscribed to the SMN topic.
	SmnTopic pulumi.StringPtrInput
	// Specifies the key/value pairs to associate with the resource.
	// Changing this parameter will create a new resource.
	Tags pulumi.StringMapInput
	// Specifies number of CUs for each TaskManager. The default value is `1`.
	TmCuNum pulumi.IntPtrInput
	// Specifies number of slots in each TaskManager.
	// The default value is `(parallel_num * tm_cu_num) / (cu_num - manager_cu_num)`.
	TmSlotNum pulumi.IntPtrInput
}

The set of arguments for constructing a FlinkjarJob resource.

func (FlinkjarJobArgs) ElementType

func (FlinkjarJobArgs) ElementType() reflect.Type

type FlinkjarJobArray

type FlinkjarJobArray []FlinkjarJobInput

func (FlinkjarJobArray) ElementType

func (FlinkjarJobArray) ElementType() reflect.Type

func (FlinkjarJobArray) ToFlinkjarJobArrayOutput

func (i FlinkjarJobArray) ToFlinkjarJobArrayOutput() FlinkjarJobArrayOutput

func (FlinkjarJobArray) ToFlinkjarJobArrayOutputWithContext

func (i FlinkjarJobArray) ToFlinkjarJobArrayOutputWithContext(ctx context.Context) FlinkjarJobArrayOutput

type FlinkjarJobArrayInput

type FlinkjarJobArrayInput interface {
	pulumi.Input

	ToFlinkjarJobArrayOutput() FlinkjarJobArrayOutput
	ToFlinkjarJobArrayOutputWithContext(context.Context) FlinkjarJobArrayOutput
}

FlinkjarJobArrayInput is an input type that accepts FlinkjarJobArray and FlinkjarJobArrayOutput values. You can construct a concrete instance of `FlinkjarJobArrayInput` via:

FlinkjarJobArray{ FlinkjarJobArgs{...} }

type FlinkjarJobArrayOutput

type FlinkjarJobArrayOutput struct{ *pulumi.OutputState }

func (FlinkjarJobArrayOutput) ElementType

func (FlinkjarJobArrayOutput) ElementType() reflect.Type

func (FlinkjarJobArrayOutput) Index

func (FlinkjarJobArrayOutput) ToFlinkjarJobArrayOutput

func (o FlinkjarJobArrayOutput) ToFlinkjarJobArrayOutput() FlinkjarJobArrayOutput

func (FlinkjarJobArrayOutput) ToFlinkjarJobArrayOutputWithContext

func (o FlinkjarJobArrayOutput) ToFlinkjarJobArrayOutputWithContext(ctx context.Context) FlinkjarJobArrayOutput

type FlinkjarJobInput

type FlinkjarJobInput interface {
	pulumi.Input

	ToFlinkjarJobOutput() FlinkjarJobOutput
	ToFlinkjarJobOutputWithContext(ctx context.Context) FlinkjarJobOutput
}

type FlinkjarJobMap

type FlinkjarJobMap map[string]FlinkjarJobInput

func (FlinkjarJobMap) ElementType

func (FlinkjarJobMap) ElementType() reflect.Type

func (FlinkjarJobMap) ToFlinkjarJobMapOutput

func (i FlinkjarJobMap) ToFlinkjarJobMapOutput() FlinkjarJobMapOutput

func (FlinkjarJobMap) ToFlinkjarJobMapOutputWithContext

func (i FlinkjarJobMap) ToFlinkjarJobMapOutputWithContext(ctx context.Context) FlinkjarJobMapOutput

type FlinkjarJobMapInput

type FlinkjarJobMapInput interface {
	pulumi.Input

	ToFlinkjarJobMapOutput() FlinkjarJobMapOutput
	ToFlinkjarJobMapOutputWithContext(context.Context) FlinkjarJobMapOutput
}

FlinkjarJobMapInput is an input type that accepts FlinkjarJobMap and FlinkjarJobMapOutput values. You can construct a concrete instance of `FlinkjarJobMapInput` via:

FlinkjarJobMap{ "key": FlinkjarJobArgs{...} }

type FlinkjarJobMapOutput

type FlinkjarJobMapOutput struct{ *pulumi.OutputState }

func (FlinkjarJobMapOutput) ElementType

func (FlinkjarJobMapOutput) ElementType() reflect.Type

func (FlinkjarJobMapOutput) MapIndex

func (FlinkjarJobMapOutput) ToFlinkjarJobMapOutput

func (o FlinkjarJobMapOutput) ToFlinkjarJobMapOutput() FlinkjarJobMapOutput

func (FlinkjarJobMapOutput) ToFlinkjarJobMapOutputWithContext

func (o FlinkjarJobMapOutput) ToFlinkjarJobMapOutputWithContext(ctx context.Context) FlinkjarJobMapOutput

type FlinkjarJobOutput

type FlinkjarJobOutput struct{ *pulumi.OutputState }

func (FlinkjarJobOutput) CheckpointPath

func (o FlinkjarJobOutput) CheckpointPath() pulumi.StringPtrOutput

Specifies storage address of the checkpoint in the JAR file of the user. The path must be unique.

func (FlinkjarJobOutput) CuNum

Specifies number of CUs selected for a job. The default value is `2`.

func (FlinkjarJobOutput) DependencyFiles

func (o FlinkjarJobOutput) DependencyFiles() pulumi.StringArrayOutput

Specifies dependency files. It is the name of the package that has been uploaded to the DLI.

func (FlinkjarJobOutput) DependencyJars

func (o FlinkjarJobOutput) DependencyJars() pulumi.StringArrayOutput

Specifies other dependency jars. It is the name of the package that has been uploaded to the DLI.

func (FlinkjarJobOutput) Description

func (o FlinkjarJobOutput) Description() pulumi.StringPtrOutput

Specifies job description. Length range: 1 to 512 characters.

func (FlinkjarJobOutput) ElementType

func (FlinkjarJobOutput) ElementType() reflect.Type

func (FlinkjarJobOutput) Entrypoint

func (o FlinkjarJobOutput) Entrypoint() pulumi.StringPtrOutput

Specifies the JAR file where the job main class is located. It is the name of the package that has been uploaded to the DLI.

func (FlinkjarJobOutput) EntrypointArgs

func (o FlinkjarJobOutput) EntrypointArgs() pulumi.StringPtrOutput

Specifies job entry arguments. Multiple arguments are separated by spaces. The arguments are keys followed by values. Keys have to start with '-' or '--'.

func (FlinkjarJobOutput) Feature

Specifies job feature. Type of the Flink image used by a job. + **basic**: indicates that the basic Flink image provided by DLI is used. + **custom**: indicates that the user-defined Flink image is used.

func (FlinkjarJobOutput) FlinkVersion

func (o FlinkjarJobOutput) FlinkVersion() pulumi.StringOutput

Specifies flink version. This parameter is valid only when feature is set to basic. You can use this parameter with the feature parameter to specify the version of the DLI basic Flink image used for job running. The options are as follows: `1.10` and `1.7`.

func (FlinkjarJobOutput) Image

Specifies custom image. The format is Organization name/Image name:Image version. This parameter is valid only when feature is set to `custom`. You can use this parameter with the feature parameter to specify a user-defined Flink image for job running. For details about how to use custom images, see the Data Lake Insight User Guide <https://support.huaweicloud.com/en-us/usermanual-dli/dli_01_0494.html>.

func (FlinkjarJobOutput) LogEnabled

func (o FlinkjarJobOutput) LogEnabled() pulumi.BoolPtrOutput

Specifies whether to enable the function of uploading job logs to users' OBS buckets. The default value is `false`.

func (FlinkjarJobOutput) MainClass

Specifies job entry class. Default main class is specified by the Manifest file of the application.

func (FlinkjarJobOutput) ManagerCuNum

func (o FlinkjarJobOutput) ManagerCuNum() pulumi.IntPtrOutput

Specifies number of CUs in the JobManager selected for a job. The default value is `1`.

func (FlinkjarJobOutput) Name

Specifies the name of the job. Length range: 1 to 57 characters. Which may consist of letters, digits, underscores (_) and hyphens (-).

func (FlinkjarJobOutput) ObsBucket

Specifies OBS path. OBS path where users are authorized to save the log. This parameter is valid only when `logEnabled` is set to `true`.

func (FlinkjarJobOutput) ParallelNum

func (o FlinkjarJobOutput) ParallelNum() pulumi.IntPtrOutput

Specifies number of parallel for a job. The default value is `1`.

func (FlinkjarJobOutput) QueueName

func (o FlinkjarJobOutput) QueueName() pulumi.StringOutput

Specifies the name of DLI queue which this job run in. The type of queue must be `general`.

func (FlinkjarJobOutput) Region

The region in which to create the DLI flink job resource. If omitted, the provider-level region will be used. Changing this parameter will create a new resource.

func (FlinkjarJobOutput) RestartWhenException

func (o FlinkjarJobOutput) RestartWhenException() pulumi.BoolPtrOutput

Specifies whether to enable the function of restart upon exceptions. The default value is `false`.

func (FlinkjarJobOutput) ResumeCheckpoint

func (o FlinkjarJobOutput) ResumeCheckpoint() pulumi.BoolPtrOutput

Specifies whether the abnormal restart is recovered from the checkpoint.

func (FlinkjarJobOutput) ResumeMaxNum

func (o FlinkjarJobOutput) ResumeMaxNum() pulumi.IntPtrOutput

Specifies maximum number of retry times upon exceptions. The unit is `times/hour`. Value range: `-1` or greater than `0`. The default value is `-1`, indicating that the number of times is unlimited.

func (FlinkjarJobOutput) RuntimeConfig

func (o FlinkjarJobOutput) RuntimeConfig() pulumi.StringMapOutput

Specifies customizes optimization parameters when a Flink job is running.

func (FlinkjarJobOutput) SmnTopic

Specifies SMN topic. If a job fails, the system will send a message to users subscribed to the SMN topic.

func (FlinkjarJobOutput) Status

The Job status.

func (FlinkjarJobOutput) Tags

Specifies the key/value pairs to associate with the resource. Changing this parameter will create a new resource.

func (FlinkjarJobOutput) TmCuNum

Specifies number of CUs for each TaskManager. The default value is `1`.

func (FlinkjarJobOutput) TmSlotNum

func (o FlinkjarJobOutput) TmSlotNum() pulumi.IntOutput

Specifies number of slots in each TaskManager. The default value is `(parallel_num * tm_cu_num) / (cu_num - manager_cu_num)`.

func (FlinkjarJobOutput) ToFlinkjarJobOutput

func (o FlinkjarJobOutput) ToFlinkjarJobOutput() FlinkjarJobOutput

func (FlinkjarJobOutput) ToFlinkjarJobOutputWithContext

func (o FlinkjarJobOutput) ToFlinkjarJobOutputWithContext(ctx context.Context) FlinkjarJobOutput

type FlinkjarJobState

type FlinkjarJobState struct {
	// Specifies storage address of the checkpoint in the JAR file of the user.
	// The path must be unique.
	CheckpointPath pulumi.StringPtrInput
	// Specifies number of CUs selected for a job. The default value is `2`.
	CuNum pulumi.IntPtrInput
	// Specifies dependency files. It is the name of the package that has been
	// uploaded to the DLI.
	DependencyFiles pulumi.StringArrayInput
	// Specifies other dependency jars. It is the name of the package that
	// has been uploaded to the DLI.
	DependencyJars pulumi.StringArrayInput
	// Specifies job description. Length range: 1 to 512 characters.
	Description pulumi.StringPtrInput
	// Specifies the JAR file where the job main class is located. It is the name of the
	// package that has been uploaded to the DLI.
	Entrypoint pulumi.StringPtrInput
	// Specifies job entry arguments. Multiple arguments are separated by spaces.
	// The arguments are keys followed by values. Keys have to start with '-' or '--'.
	EntrypointArgs pulumi.StringPtrInput
	// Specifies job feature. Type of the Flink image used by a job.
	// + **basic**: indicates that the basic Flink image provided by DLI is used.
	// + **custom**: indicates that the user-defined Flink image is used.
	Feature pulumi.StringPtrInput
	// Specifies flink version. This parameter is valid only when feature is set
	// to basic. You can use this parameter with the feature parameter to specify the version of the DLI basic Flink image
	// used for job running. The options are as follows: `1.10` and `1.7`.
	FlinkVersion pulumi.StringPtrInput
	// Specifies custom image. The format is Organization name/Image name:Image version.
	// This parameter is valid only when feature is set to `custom`. You can use this parameter with the feature parameter
	// to specify a user-defined Flink image for job running. For details about how to use custom images, see the
	// Data Lake Insight User Guide <https://support.huaweicloud.com/en-us/usermanual-dli/dli_01_0494.html>.
	Image pulumi.StringPtrInput
	// Specifies whether to enable the function of uploading job logs to users' OBS buckets.
	// The default value is `false`.
	LogEnabled pulumi.BoolPtrInput
	// Specifies job entry class. Default main class is specified by the Manifest file
	// of the application.
	MainClass pulumi.StringPtrInput
	// Specifies number of CUs in the JobManager selected for a job.
	// The default value is `1`.
	ManagerCuNum pulumi.IntPtrInput
	// Specifies the name of the job. Length range: 1 to 57 characters.
	// Which may consist of letters, digits, underscores (_) and hyphens (-).
	Name pulumi.StringPtrInput
	// Specifies OBS path. OBS path where users are authorized to save the log.
	// This parameter is valid only when `logEnabled` is set to `true`.
	ObsBucket pulumi.StringPtrInput
	// Specifies number of parallel for a job. The default value is `1`.
	ParallelNum pulumi.IntPtrInput
	// Specifies the name of DLI queue which this job run in. The type of queue
	// must be `general`.
	QueueName pulumi.StringPtrInput
	// The region in which to create the DLI flink job resource. If omitted, the
	// provider-level region will be used. Changing this parameter will create a new resource.
	Region pulumi.StringPtrInput
	// Specifies whether to enable the function of restart upon exceptions.
	// The default value is `false`.
	RestartWhenException pulumi.BoolPtrInput
	// Specifies whether the abnormal restart is recovered from the checkpoint.
	ResumeCheckpoint pulumi.BoolPtrInput
	// Specifies maximum number of retry times upon exceptions. The unit is
	// `times/hour`. Value range: `-1` or greater than `0`. The default value is `-1`, indicating that the number of times is
	// unlimited.
	ResumeMaxNum pulumi.IntPtrInput
	// Specifies customizes optimization parameters when a Flink job is running.
	RuntimeConfig pulumi.StringMapInput
	// Specifies SMN topic. If a job fails, the system will send a message to users
	// subscribed to the SMN topic.
	SmnTopic pulumi.StringPtrInput
	// The Job status.
	Status pulumi.StringPtrInput
	// Specifies the key/value pairs to associate with the resource.
	// Changing this parameter will create a new resource.
	Tags pulumi.StringMapInput
	// Specifies number of CUs for each TaskManager. The default value is `1`.
	TmCuNum pulumi.IntPtrInput
	// Specifies number of slots in each TaskManager.
	// The default value is `(parallel_num * tm_cu_num) / (cu_num - manager_cu_num)`.
	TmSlotNum pulumi.IntPtrInput
}

func (FlinkjarJobState) ElementType

func (FlinkjarJobState) ElementType() reflect.Type

type FlinksqlJob

type FlinksqlJob struct {
	pulumi.CustomResourceState

	// Specifies whether to enable the automatic job snapshot function.
	// + **true**: indicates to enable the automatic job snapshot function.
	// + **false**: indicates to disable the automatic job snapshot function.
	CheckpointEnabled pulumi.BoolPtrOutput `pulumi:"checkpointEnabled"`
	// Specifies snapshot interval. The unit is second.
	// The default value is 10.
	CheckpointInterval pulumi.IntPtrOutput `pulumi:"checkpointInterval"`
	// Specifies snapshot mode. There are two options:
	// + **exactly_once**: indicates that data is processed only once.
	// + **at_least_once**: indicates that data is processed at least once.
	CheckpointMode pulumi.StringPtrOutput `pulumi:"checkpointMode"`
	// Specifies number of CUs selected for a job. The default value is 2.
	CuNumber pulumi.IntPtrOutput `pulumi:"cuNumber"`
	// Specifies job description. Length range: 1 to 512 characters.
	Description pulumi.StringPtrOutput `pulumi:"description"`
	// Specifies dirty data policy of a job.
	// + **2:obsDir**: Save the dirty data to the obs path `obsDir`. For example: `2:yourBucket/output_path`
	// + **1**: Trigger a job exception
	// + **0**: Ignore
	DirtyDataStrategy pulumi.StringPtrOutput `pulumi:"dirtyDataStrategy"`
	// Specifies edge computing group IDs.
	EdgeGroupIds pulumi.StringArrayOutput `pulumi:"edgeGroupIds"`
	// Specifies retention time of the idle state. The unit is hour.
	// The default value is 1.
	IdleStateRetention pulumi.IntPtrOutput `pulumi:"idleStateRetention"`
	// Specifies whether to enable the function of uploading job logs to
	// users' OBS buckets. The default value is false.
	LogEnabled pulumi.BoolPtrOutput `pulumi:"logEnabled"`
	// Specifies number of CUs in the JobManager selected for a job.
	// The default value is 1.
	ManagerCuNumber pulumi.IntPtrOutput `pulumi:"managerCuNumber"`
	// Specifies the name of the job. Length range: 1 to 57 characters.
	// which may consist of letters, digits, underscores (_) and hyphens (-).
	Name pulumi.StringOutput `pulumi:"name"`
	// Specifies OBS path. OBS path where users are authorized to save the
	// snapshot. This parameter is valid only when `checkpointEnabled` is set to `true`. OBS path where users are authorized
	// to save the snapshot. This parameter is valid only when `logEnabled` is set to `true`.
	ObsBucket pulumi.StringPtrOutput `pulumi:"obsBucket"`
	// Specifies number of parallel for a job. The default value is 1.
	ParallelNumber pulumi.IntPtrOutput `pulumi:"parallelNumber"`
	// Specifies name of a queue.
	QueueName pulumi.StringOutput `pulumi:"queueName"`
	// The region in which to create the DLI flink job resource. If omitted, the
	// provider-level region will be used. Changing this parameter will create a new resource.
	Region pulumi.StringOutput `pulumi:"region"`
	// Specifies whether to enable the function of automatically
	// restarting a job upon job exceptions. The default value is false.
	RestartWhenException pulumi.BoolPtrOutput `pulumi:"restartWhenException"`
	// Specifies whether the abnormal restart is recovered from the
	// checkpoint.
	ResumeCheckpoint pulumi.BoolPtrOutput `pulumi:"resumeCheckpoint"`
	// Specifies maximum number of retry times upon exceptions. The unit is
	// `times/hour`. Value range: `-1` or greater than `0`. The default value is `-1`, indicating that the number of times is
	// unlimited.
	ResumeMaxNum pulumi.IntPtrOutput `pulumi:"resumeMaxNum"`
	// Specifies job running mode. The options are as follows:
	RunMode pulumi.StringPtrOutput `pulumi:"runMode"`
	// Specifies customizes optimization parameters when a Flink job is
	// running.
	RuntimeConfig pulumi.StringMapOutput `pulumi:"runtimeConfig"`
	// Specifies SMN topic. If a job fails, the system will send a message to
	// users subscribed to the SMN topic.
	SmnTopic pulumi.StringPtrOutput `pulumi:"smnTopic"`
	// Specifies stream SQL statement, which includes at least the following
	// three parts: source, query, and sink. Length range: 1024x1024 characters.
	Sql pulumi.StringPtrOutput `pulumi:"sql"`
	// The Job status.
	Status pulumi.StringOutput `pulumi:"status"`
	// Specifies the key/value pairs to associate with the resource.
	Tags pulumi.StringMapOutput `pulumi:"tags"`
	// Specifies number of CUs for each Task Manager. The default value is 1.
	TmCus pulumi.IntPtrOutput `pulumi:"tmCus"`
	// Specifies number of slots in each Task Manager.
	// The default value is (**parallel_number** * **tm_cus**)/(**cu_number** - **manager_cu_number**).
	TmSlotNum pulumi.IntOutput `pulumi:"tmSlotNum"`
	// Specifies the type of the job. The valid values are `flinkSqlJob`,
	// `flinkOpensourceSqlJob` and `flinkSqlEdgeJob`. Default value is `flinkSqlJob`.
	// Changing this parameter will create a new resource.
	Type pulumi.StringPtrOutput `pulumi:"type"`
	// Specifies name of the resource package that has been uploaded to the
	// DLI resource management system. The UDF Jar file of the SQL job is specified by this parameter.
	UdfJarUrl pulumi.StringOutput `pulumi:"udfJarUrl"`
}

Manages a flink sql job resource within HuaweiCloud DLI.

## Example Usage ### Create a flink job

```go package main

import (

"github.com/huaweicloud/pulumi-huaweicloud/sdk/go/huaweicloud/Dli"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		cfg := config.New(ctx, "")
		sql := cfg.RequireObject("sql")
		jobName := cfg.RequireObject("jobName")
		_, err := Dli.NewFlinksqlJob(ctx, "test", &Dli.FlinksqlJobArgs{
			Type: pulumi.String("flink_sql_job"),
			Sql:  pulumi.Any(sql),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

Clusters can be imported by their `id`. For example,

```sh

$ pulumi import huaweicloud:Dli/flinksqlJob:FlinksqlJob test 12345

```

func GetFlinksqlJob

func GetFlinksqlJob(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *FlinksqlJobState, opts ...pulumi.ResourceOption) (*FlinksqlJob, error)

GetFlinksqlJob gets an existing FlinksqlJob resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewFlinksqlJob

func NewFlinksqlJob(ctx *pulumi.Context,
	name string, args *FlinksqlJobArgs, opts ...pulumi.ResourceOption) (*FlinksqlJob, error)

NewFlinksqlJob registers a new resource with the given unique name, arguments, and options.

func (*FlinksqlJob) ElementType

func (*FlinksqlJob) ElementType() reflect.Type

func (*FlinksqlJob) ToFlinksqlJobOutput

func (i *FlinksqlJob) ToFlinksqlJobOutput() FlinksqlJobOutput

func (*FlinksqlJob) ToFlinksqlJobOutputWithContext

func (i *FlinksqlJob) ToFlinksqlJobOutputWithContext(ctx context.Context) FlinksqlJobOutput

type FlinksqlJobArgs

type FlinksqlJobArgs struct {
	// Specifies whether to enable the automatic job snapshot function.
	// + **true**: indicates to enable the automatic job snapshot function.
	// + **false**: indicates to disable the automatic job snapshot function.
	CheckpointEnabled pulumi.BoolPtrInput
	// Specifies snapshot interval. The unit is second.
	// The default value is 10.
	CheckpointInterval pulumi.IntPtrInput
	// Specifies snapshot mode. There are two options:
	// + **exactly_once**: indicates that data is processed only once.
	// + **at_least_once**: indicates that data is processed at least once.
	CheckpointMode pulumi.StringPtrInput
	// Specifies number of CUs selected for a job. The default value is 2.
	CuNumber pulumi.IntPtrInput
	// Specifies job description. Length range: 1 to 512 characters.
	Description pulumi.StringPtrInput
	// Specifies dirty data policy of a job.
	// + **2:obsDir**: Save the dirty data to the obs path `obsDir`. For example: `2:yourBucket/output_path`
	// + **1**: Trigger a job exception
	// + **0**: Ignore
	DirtyDataStrategy pulumi.StringPtrInput
	// Specifies edge computing group IDs.
	EdgeGroupIds pulumi.StringArrayInput
	// Specifies retention time of the idle state. The unit is hour.
	// The default value is 1.
	IdleStateRetention pulumi.IntPtrInput
	// Specifies whether to enable the function of uploading job logs to
	// users' OBS buckets. The default value is false.
	LogEnabled pulumi.BoolPtrInput
	// Specifies number of CUs in the JobManager selected for a job.
	// The default value is 1.
	ManagerCuNumber pulumi.IntPtrInput
	// Specifies the name of the job. Length range: 1 to 57 characters.
	// which may consist of letters, digits, underscores (_) and hyphens (-).
	Name pulumi.StringPtrInput
	// Specifies OBS path. OBS path where users are authorized to save the
	// snapshot. This parameter is valid only when `checkpointEnabled` is set to `true`. OBS path where users are authorized
	// to save the snapshot. This parameter is valid only when `logEnabled` is set to `true`.
	ObsBucket pulumi.StringPtrInput
	// Specifies number of parallel for a job. The default value is 1.
	ParallelNumber pulumi.IntPtrInput
	// Specifies name of a queue.
	QueueName pulumi.StringPtrInput
	// The region in which to create the DLI flink job resource. If omitted, the
	// provider-level region will be used. Changing this parameter will create a new resource.
	Region pulumi.StringPtrInput
	// Specifies whether to enable the function of automatically
	// restarting a job upon job exceptions. The default value is false.
	RestartWhenException pulumi.BoolPtrInput
	// Specifies whether the abnormal restart is recovered from the
	// checkpoint.
	ResumeCheckpoint pulumi.BoolPtrInput
	// Specifies maximum number of retry times upon exceptions. The unit is
	// `times/hour`. Value range: `-1` or greater than `0`. The default value is `-1`, indicating that the number of times is
	// unlimited.
	ResumeMaxNum pulumi.IntPtrInput
	// Specifies job running mode. The options are as follows:
	RunMode pulumi.StringPtrInput
	// Specifies customizes optimization parameters when a Flink job is
	// running.
	RuntimeConfig pulumi.StringMapInput
	// Specifies SMN topic. If a job fails, the system will send a message to
	// users subscribed to the SMN topic.
	SmnTopic pulumi.StringPtrInput
	// Specifies stream SQL statement, which includes at least the following
	// three parts: source, query, and sink. Length range: 1024x1024 characters.
	Sql pulumi.StringPtrInput
	// Specifies the key/value pairs to associate with the resource.
	Tags pulumi.StringMapInput
	// Specifies number of CUs for each Task Manager. The default value is 1.
	TmCus pulumi.IntPtrInput
	// Specifies number of slots in each Task Manager.
	// The default value is (**parallel_number** * **tm_cus**)/(**cu_number** - **manager_cu_number**).
	TmSlotNum pulumi.IntPtrInput
	// Specifies the type of the job. The valid values are `flinkSqlJob`,
	// `flinkOpensourceSqlJob` and `flinkSqlEdgeJob`. Default value is `flinkSqlJob`.
	// Changing this parameter will create a new resource.
	Type pulumi.StringPtrInput
	// Specifies name of the resource package that has been uploaded to the
	// DLI resource management system. The UDF Jar file of the SQL job is specified by this parameter.
	UdfJarUrl pulumi.StringPtrInput
}

The set of arguments for constructing a FlinksqlJob resource.

func (FlinksqlJobArgs) ElementType

func (FlinksqlJobArgs) ElementType() reflect.Type

type FlinksqlJobArray

type FlinksqlJobArray []FlinksqlJobInput

func (FlinksqlJobArray) ElementType

func (FlinksqlJobArray) ElementType() reflect.Type

func (FlinksqlJobArray) ToFlinksqlJobArrayOutput

func (i FlinksqlJobArray) ToFlinksqlJobArrayOutput() FlinksqlJobArrayOutput

func (FlinksqlJobArray) ToFlinksqlJobArrayOutputWithContext

func (i FlinksqlJobArray) ToFlinksqlJobArrayOutputWithContext(ctx context.Context) FlinksqlJobArrayOutput

type FlinksqlJobArrayInput

type FlinksqlJobArrayInput interface {
	pulumi.Input

	ToFlinksqlJobArrayOutput() FlinksqlJobArrayOutput
	ToFlinksqlJobArrayOutputWithContext(context.Context) FlinksqlJobArrayOutput
}

FlinksqlJobArrayInput is an input type that accepts FlinksqlJobArray and FlinksqlJobArrayOutput values. You can construct a concrete instance of `FlinksqlJobArrayInput` via:

FlinksqlJobArray{ FlinksqlJobArgs{...} }

type FlinksqlJobArrayOutput

type FlinksqlJobArrayOutput struct{ *pulumi.OutputState }

func (FlinksqlJobArrayOutput) ElementType

func (FlinksqlJobArrayOutput) ElementType() reflect.Type

func (FlinksqlJobArrayOutput) Index

func (FlinksqlJobArrayOutput) ToFlinksqlJobArrayOutput

func (o FlinksqlJobArrayOutput) ToFlinksqlJobArrayOutput() FlinksqlJobArrayOutput

func (FlinksqlJobArrayOutput) ToFlinksqlJobArrayOutputWithContext

func (o FlinksqlJobArrayOutput) ToFlinksqlJobArrayOutputWithContext(ctx context.Context) FlinksqlJobArrayOutput

type FlinksqlJobInput

type FlinksqlJobInput interface {
	pulumi.Input

	ToFlinksqlJobOutput() FlinksqlJobOutput
	ToFlinksqlJobOutputWithContext(ctx context.Context) FlinksqlJobOutput
}

type FlinksqlJobMap

type FlinksqlJobMap map[string]FlinksqlJobInput

func (FlinksqlJobMap) ElementType

func (FlinksqlJobMap) ElementType() reflect.Type

func (FlinksqlJobMap) ToFlinksqlJobMapOutput

func (i FlinksqlJobMap) ToFlinksqlJobMapOutput() FlinksqlJobMapOutput

func (FlinksqlJobMap) ToFlinksqlJobMapOutputWithContext

func (i FlinksqlJobMap) ToFlinksqlJobMapOutputWithContext(ctx context.Context) FlinksqlJobMapOutput

type FlinksqlJobMapInput

type FlinksqlJobMapInput interface {
	pulumi.Input

	ToFlinksqlJobMapOutput() FlinksqlJobMapOutput
	ToFlinksqlJobMapOutputWithContext(context.Context) FlinksqlJobMapOutput
}

FlinksqlJobMapInput is an input type that accepts FlinksqlJobMap and FlinksqlJobMapOutput values. You can construct a concrete instance of `FlinksqlJobMapInput` via:

FlinksqlJobMap{ "key": FlinksqlJobArgs{...} }

type FlinksqlJobMapOutput

type FlinksqlJobMapOutput struct{ *pulumi.OutputState }

func (FlinksqlJobMapOutput) ElementType

func (FlinksqlJobMapOutput) ElementType() reflect.Type

func (FlinksqlJobMapOutput) MapIndex

func (FlinksqlJobMapOutput) ToFlinksqlJobMapOutput

func (o FlinksqlJobMapOutput) ToFlinksqlJobMapOutput() FlinksqlJobMapOutput

func (FlinksqlJobMapOutput) ToFlinksqlJobMapOutputWithContext

func (o FlinksqlJobMapOutput) ToFlinksqlJobMapOutputWithContext(ctx context.Context) FlinksqlJobMapOutput

type FlinksqlJobOutput

type FlinksqlJobOutput struct{ *pulumi.OutputState }

func (FlinksqlJobOutput) CheckpointEnabled

func (o FlinksqlJobOutput) CheckpointEnabled() pulumi.BoolPtrOutput

Specifies whether to enable the automatic job snapshot function. + **true**: indicates to enable the automatic job snapshot function. + **false**: indicates to disable the automatic job snapshot function.

func (FlinksqlJobOutput) CheckpointInterval

func (o FlinksqlJobOutput) CheckpointInterval() pulumi.IntPtrOutput

Specifies snapshot interval. The unit is second. The default value is 10.

func (FlinksqlJobOutput) CheckpointMode

func (o FlinksqlJobOutput) CheckpointMode() pulumi.StringPtrOutput

Specifies snapshot mode. There are two options: + **exactly_once**: indicates that data is processed only once. + **at_least_once**: indicates that data is processed at least once.

func (FlinksqlJobOutput) CuNumber

func (o FlinksqlJobOutput) CuNumber() pulumi.IntPtrOutput

Specifies number of CUs selected for a job. The default value is 2.

func (FlinksqlJobOutput) Description

func (o FlinksqlJobOutput) Description() pulumi.StringPtrOutput

Specifies job description. Length range: 1 to 512 characters.

func (FlinksqlJobOutput) DirtyDataStrategy

func (o FlinksqlJobOutput) DirtyDataStrategy() pulumi.StringPtrOutput

Specifies dirty data policy of a job. + **2:obsDir**: Save the dirty data to the obs path `obsDir`. For example: `2:yourBucket/output_path` + **1**: Trigger a job exception + **0**: Ignore

func (FlinksqlJobOutput) EdgeGroupIds

func (o FlinksqlJobOutput) EdgeGroupIds() pulumi.StringArrayOutput

Specifies edge computing group IDs.

func (FlinksqlJobOutput) ElementType

func (FlinksqlJobOutput) ElementType() reflect.Type

func (FlinksqlJobOutput) IdleStateRetention

func (o FlinksqlJobOutput) IdleStateRetention() pulumi.IntPtrOutput

Specifies retention time of the idle state. The unit is hour. The default value is 1.

func (FlinksqlJobOutput) LogEnabled

func (o FlinksqlJobOutput) LogEnabled() pulumi.BoolPtrOutput

Specifies whether to enable the function of uploading job logs to users' OBS buckets. The default value is false.

func (FlinksqlJobOutput) ManagerCuNumber

func (o FlinksqlJobOutput) ManagerCuNumber() pulumi.IntPtrOutput

Specifies number of CUs in the JobManager selected for a job. The default value is 1.

func (FlinksqlJobOutput) Name

Specifies the name of the job. Length range: 1 to 57 characters. which may consist of letters, digits, underscores (_) and hyphens (-).

func (FlinksqlJobOutput) ObsBucket

Specifies OBS path. OBS path where users are authorized to save the snapshot. This parameter is valid only when `checkpointEnabled` is set to `true`. OBS path where users are authorized to save the snapshot. This parameter is valid only when `logEnabled` is set to `true`.

func (FlinksqlJobOutput) ParallelNumber

func (o FlinksqlJobOutput) ParallelNumber() pulumi.IntPtrOutput

Specifies number of parallel for a job. The default value is 1.

func (FlinksqlJobOutput) QueueName

func (o FlinksqlJobOutput) QueueName() pulumi.StringOutput

Specifies name of a queue.

func (FlinksqlJobOutput) Region

The region in which to create the DLI flink job resource. If omitted, the provider-level region will be used. Changing this parameter will create a new resource.

func (FlinksqlJobOutput) RestartWhenException

func (o FlinksqlJobOutput) RestartWhenException() pulumi.BoolPtrOutput

Specifies whether to enable the function of automatically restarting a job upon job exceptions. The default value is false.

func (FlinksqlJobOutput) ResumeCheckpoint

func (o FlinksqlJobOutput) ResumeCheckpoint() pulumi.BoolPtrOutput

Specifies whether the abnormal restart is recovered from the checkpoint.

func (FlinksqlJobOutput) ResumeMaxNum

func (o FlinksqlJobOutput) ResumeMaxNum() pulumi.IntPtrOutput

Specifies maximum number of retry times upon exceptions. The unit is `times/hour`. Value range: `-1` or greater than `0`. The default value is `-1`, indicating that the number of times is unlimited.

func (FlinksqlJobOutput) RunMode

Specifies job running mode. The options are as follows:

func (FlinksqlJobOutput) RuntimeConfig

func (o FlinksqlJobOutput) RuntimeConfig() pulumi.StringMapOutput

Specifies customizes optimization parameters when a Flink job is running.

func (FlinksqlJobOutput) SmnTopic

Specifies SMN topic. If a job fails, the system will send a message to users subscribed to the SMN topic.

func (FlinksqlJobOutput) Sql

Specifies stream SQL statement, which includes at least the following three parts: source, query, and sink. Length range: 1024x1024 characters.

func (FlinksqlJobOutput) Status

The Job status.

func (FlinksqlJobOutput) Tags

Specifies the key/value pairs to associate with the resource.

func (FlinksqlJobOutput) TmCus

Specifies number of CUs for each Task Manager. The default value is 1.

func (FlinksqlJobOutput) TmSlotNum

func (o FlinksqlJobOutput) TmSlotNum() pulumi.IntOutput

Specifies number of slots in each Task Manager. The default value is (**parallel_number** * **tm_cus**)/(**cu_number** - **manager_cu_number**).

func (FlinksqlJobOutput) ToFlinksqlJobOutput

func (o FlinksqlJobOutput) ToFlinksqlJobOutput() FlinksqlJobOutput

func (FlinksqlJobOutput) ToFlinksqlJobOutputWithContext

func (o FlinksqlJobOutput) ToFlinksqlJobOutputWithContext(ctx context.Context) FlinksqlJobOutput

func (FlinksqlJobOutput) Type

Specifies the type of the job. The valid values are `flinkSqlJob`, `flinkOpensourceSqlJob` and `flinkSqlEdgeJob`. Default value is `flinkSqlJob`. Changing this parameter will create a new resource.

func (FlinksqlJobOutput) UdfJarUrl

func (o FlinksqlJobOutput) UdfJarUrl() pulumi.StringOutput

Specifies name of the resource package that has been uploaded to the DLI resource management system. The UDF Jar file of the SQL job is specified by this parameter.

type FlinksqlJobState

type FlinksqlJobState struct {
	// Specifies whether to enable the automatic job snapshot function.
	// + **true**: indicates to enable the automatic job snapshot function.
	// + **false**: indicates to disable the automatic job snapshot function.
	CheckpointEnabled pulumi.BoolPtrInput
	// Specifies snapshot interval. The unit is second.
	// The default value is 10.
	CheckpointInterval pulumi.IntPtrInput
	// Specifies snapshot mode. There are two options:
	// + **exactly_once**: indicates that data is processed only once.
	// + **at_least_once**: indicates that data is processed at least once.
	CheckpointMode pulumi.StringPtrInput
	// Specifies number of CUs selected for a job. The default value is 2.
	CuNumber pulumi.IntPtrInput
	// Specifies job description. Length range: 1 to 512 characters.
	Description pulumi.StringPtrInput
	// Specifies dirty data policy of a job.
	// + **2:obsDir**: Save the dirty data to the obs path `obsDir`. For example: `2:yourBucket/output_path`
	// + **1**: Trigger a job exception
	// + **0**: Ignore
	DirtyDataStrategy pulumi.StringPtrInput
	// Specifies edge computing group IDs.
	EdgeGroupIds pulumi.StringArrayInput
	// Specifies retention time of the idle state. The unit is hour.
	// The default value is 1.
	IdleStateRetention pulumi.IntPtrInput
	// Specifies whether to enable the function of uploading job logs to
	// users' OBS buckets. The default value is false.
	LogEnabled pulumi.BoolPtrInput
	// Specifies number of CUs in the JobManager selected for a job.
	// The default value is 1.
	ManagerCuNumber pulumi.IntPtrInput
	// Specifies the name of the job. Length range: 1 to 57 characters.
	// which may consist of letters, digits, underscores (_) and hyphens (-).
	Name pulumi.StringPtrInput
	// Specifies OBS path. OBS path where users are authorized to save the
	// snapshot. This parameter is valid only when `checkpointEnabled` is set to `true`. OBS path where users are authorized
	// to save the snapshot. This parameter is valid only when `logEnabled` is set to `true`.
	ObsBucket pulumi.StringPtrInput
	// Specifies number of parallel for a job. The default value is 1.
	ParallelNumber pulumi.IntPtrInput
	// Specifies name of a queue.
	QueueName pulumi.StringPtrInput
	// The region in which to create the DLI flink job resource. If omitted, the
	// provider-level region will be used. Changing this parameter will create a new resource.
	Region pulumi.StringPtrInput
	// Specifies whether to enable the function of automatically
	// restarting a job upon job exceptions. The default value is false.
	RestartWhenException pulumi.BoolPtrInput
	// Specifies whether the abnormal restart is recovered from the
	// checkpoint.
	ResumeCheckpoint pulumi.BoolPtrInput
	// Specifies maximum number of retry times upon exceptions. The unit is
	// `times/hour`. Value range: `-1` or greater than `0`. The default value is `-1`, indicating that the number of times is
	// unlimited.
	ResumeMaxNum pulumi.IntPtrInput
	// Specifies job running mode. The options are as follows:
	RunMode pulumi.StringPtrInput
	// Specifies customizes optimization parameters when a Flink job is
	// running.
	RuntimeConfig pulumi.StringMapInput
	// Specifies SMN topic. If a job fails, the system will send a message to
	// users subscribed to the SMN topic.
	SmnTopic pulumi.StringPtrInput
	// Specifies stream SQL statement, which includes at least the following
	// three parts: source, query, and sink. Length range: 1024x1024 characters.
	Sql pulumi.StringPtrInput
	// The Job status.
	Status pulumi.StringPtrInput
	// Specifies the key/value pairs to associate with the resource.
	Tags pulumi.StringMapInput
	// Specifies number of CUs for each Task Manager. The default value is 1.
	TmCus pulumi.IntPtrInput
	// Specifies number of slots in each Task Manager.
	// The default value is (**parallel_number** * **tm_cus**)/(**cu_number** - **manager_cu_number**).
	TmSlotNum pulumi.IntPtrInput
	// Specifies the type of the job. The valid values are `flinkSqlJob`,
	// `flinkOpensourceSqlJob` and `flinkSqlEdgeJob`. Default value is `flinkSqlJob`.
	// Changing this parameter will create a new resource.
	Type pulumi.StringPtrInput
	// Specifies name of the resource package that has been uploaded to the
	// DLI resource management system. The UDF Jar file of the SQL job is specified by this parameter.
	UdfJarUrl pulumi.StringPtrInput
}

func (FlinksqlJobState) ElementType

func (FlinksqlJobState) ElementType() reflect.Type

type Package

type Package struct {
	pulumi.CustomResourceState

	// Time when a queue is created.
	CreatedAt pulumi.StringOutput `pulumi:"createdAt"`
	// Specifies the group name which the package belongs to.
	// Changing this parameter will delete the current package and upload a new package.
	GroupName pulumi.StringOutput `pulumi:"groupName"`
	// Specifies whether to upload resource packages in asynchronous mode.
	// The default value is **false**. Changing this parameter will delete the current package and upload a new package.
	IsAsync pulumi.BoolOutput `pulumi:"isAsync"`
	// The package name.
	ObjectName pulumi.StringOutput `pulumi:"objectName"`
	// Specifies the OBS storage path where the package is located.
	// For example, `https://{bucket_name}.obs.{region}.myhuaweicloud.com/dli/packages/object_file.py`.
	// Changing this parameter will delete the current package and upload a new package.
	ObjectPath pulumi.StringOutput `pulumi:"objectPath"`
	// Specifies the name of the package owner. The owner must be IAM user.
	Owner pulumi.StringOutput `pulumi:"owner"`
	// Specifies the region in which to upload packages.
	// If omitted, the provider-level region will be used.
	// Changing this parameter will delete the current package and upload a new package.
	Region pulumi.StringOutput `pulumi:"region"`
	// Status of a package group to be uploaded.
	Status pulumi.StringOutput `pulumi:"status"`
	// Specifies the package type.
	// + **jar**: `.jar` or jar related files.
	// + **pyFile**: `.py` or python related files.
	// + **file**: Other user files.
	Type pulumi.StringOutput `pulumi:"type"`
	// The last time when the package configuration update has complated.
	UpdatedAt pulumi.StringOutput `pulumi:"updatedAt"`
}

Manages DLI package resource within HuaweiCloud

## Example Usage ### Upload the specified python script as a resource package

```go package main

import (

"fmt"

"github.com/huaweicloud/pulumi-huaweicloud/sdk/go/huaweicloud/Dli"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		cfg := config.New(ctx, "")
		groupName := cfg.RequireObject("groupName")
		accessDomainName := cfg.RequireObject("accessDomainName")
		_, err := Dli.NewPackage(ctx, "queue", &Dli.PackageArgs{
			GroupName:  pulumi.Any(groupName),
			ObjectPath: pulumi.String(fmt.Sprintf("https://%v/dli/packages/object_file.py", accessDomainName)),
			Type:       pulumi.String("pyFile"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

func GetPackage

func GetPackage(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *PackageState, opts ...pulumi.ResourceOption) (*Package, error)

GetPackage gets an existing Package resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewPackage

func NewPackage(ctx *pulumi.Context,
	name string, args *PackageArgs, opts ...pulumi.ResourceOption) (*Package, error)

NewPackage registers a new resource with the given unique name, arguments, and options.

func (*Package) ElementType

func (*Package) ElementType() reflect.Type

func (*Package) ToPackageOutput

func (i *Package) ToPackageOutput() PackageOutput

func (*Package) ToPackageOutputWithContext

func (i *Package) ToPackageOutputWithContext(ctx context.Context) PackageOutput

type PackageArgs

type PackageArgs struct {
	// Specifies the group name which the package belongs to.
	// Changing this parameter will delete the current package and upload a new package.
	GroupName pulumi.StringInput
	// Specifies whether to upload resource packages in asynchronous mode.
	// The default value is **false**. Changing this parameter will delete the current package and upload a new package.
	IsAsync pulumi.BoolPtrInput
	// Specifies the OBS storage path where the package is located.
	// For example, `https://{bucket_name}.obs.{region}.myhuaweicloud.com/dli/packages/object_file.py`.
	// Changing this parameter will delete the current package and upload a new package.
	ObjectPath pulumi.StringInput
	// Specifies the name of the package owner. The owner must be IAM user.
	Owner pulumi.StringPtrInput
	// Specifies the region in which to upload packages.
	// If omitted, the provider-level region will be used.
	// Changing this parameter will delete the current package and upload a new package.
	Region pulumi.StringPtrInput
	// Specifies the package type.
	// + **jar**: `.jar` or jar related files.
	// + **pyFile**: `.py` or python related files.
	// + **file**: Other user files.
	Type pulumi.StringInput
}

The set of arguments for constructing a Package resource.

func (PackageArgs) ElementType

func (PackageArgs) ElementType() reflect.Type

type PackageArray

type PackageArray []PackageInput

func (PackageArray) ElementType

func (PackageArray) ElementType() reflect.Type

func (PackageArray) ToPackageArrayOutput

func (i PackageArray) ToPackageArrayOutput() PackageArrayOutput

func (PackageArray) ToPackageArrayOutputWithContext

func (i PackageArray) ToPackageArrayOutputWithContext(ctx context.Context) PackageArrayOutput

type PackageArrayInput

type PackageArrayInput interface {
	pulumi.Input

	ToPackageArrayOutput() PackageArrayOutput
	ToPackageArrayOutputWithContext(context.Context) PackageArrayOutput
}

PackageArrayInput is an input type that accepts PackageArray and PackageArrayOutput values. You can construct a concrete instance of `PackageArrayInput` via:

PackageArray{ PackageArgs{...} }

type PackageArrayOutput

type PackageArrayOutput struct{ *pulumi.OutputState }

func (PackageArrayOutput) ElementType

func (PackageArrayOutput) ElementType() reflect.Type

func (PackageArrayOutput) Index

func (PackageArrayOutput) ToPackageArrayOutput

func (o PackageArrayOutput) ToPackageArrayOutput() PackageArrayOutput

func (PackageArrayOutput) ToPackageArrayOutputWithContext

func (o PackageArrayOutput) ToPackageArrayOutputWithContext(ctx context.Context) PackageArrayOutput

type PackageInput

type PackageInput interface {
	pulumi.Input

	ToPackageOutput() PackageOutput
	ToPackageOutputWithContext(ctx context.Context) PackageOutput
}

type PackageMap

type PackageMap map[string]PackageInput

func (PackageMap) ElementType

func (PackageMap) ElementType() reflect.Type

func (PackageMap) ToPackageMapOutput

func (i PackageMap) ToPackageMapOutput() PackageMapOutput

func (PackageMap) ToPackageMapOutputWithContext

func (i PackageMap) ToPackageMapOutputWithContext(ctx context.Context) PackageMapOutput

type PackageMapInput

type PackageMapInput interface {
	pulumi.Input

	ToPackageMapOutput() PackageMapOutput
	ToPackageMapOutputWithContext(context.Context) PackageMapOutput
}

PackageMapInput is an input type that accepts PackageMap and PackageMapOutput values. You can construct a concrete instance of `PackageMapInput` via:

PackageMap{ "key": PackageArgs{...} }

type PackageMapOutput

type PackageMapOutput struct{ *pulumi.OutputState }

func (PackageMapOutput) ElementType

func (PackageMapOutput) ElementType() reflect.Type

func (PackageMapOutput) MapIndex

func (PackageMapOutput) ToPackageMapOutput

func (o PackageMapOutput) ToPackageMapOutput() PackageMapOutput

func (PackageMapOutput) ToPackageMapOutputWithContext

func (o PackageMapOutput) ToPackageMapOutputWithContext(ctx context.Context) PackageMapOutput

type PackageOutput

type PackageOutput struct{ *pulumi.OutputState }

func (PackageOutput) CreatedAt

func (o PackageOutput) CreatedAt() pulumi.StringOutput

Time when a queue is created.

func (PackageOutput) ElementType

func (PackageOutput) ElementType() reflect.Type

func (PackageOutput) GroupName

func (o PackageOutput) GroupName() pulumi.StringOutput

Specifies the group name which the package belongs to. Changing this parameter will delete the current package and upload a new package.

func (PackageOutput) IsAsync

func (o PackageOutput) IsAsync() pulumi.BoolOutput

Specifies whether to upload resource packages in asynchronous mode. The default value is **false**. Changing this parameter will delete the current package and upload a new package.

func (PackageOutput) ObjectName

func (o PackageOutput) ObjectName() pulumi.StringOutput

The package name.

func (PackageOutput) ObjectPath

func (o PackageOutput) ObjectPath() pulumi.StringOutput

Specifies the OBS storage path where the package is located. For example, `https://{bucket_name}.obs.{region}.myhuaweicloud.com/dli/packages/object_file.py`. Changing this parameter will delete the current package and upload a new package.

func (PackageOutput) Owner

func (o PackageOutput) Owner() pulumi.StringOutput

Specifies the name of the package owner. The owner must be IAM user.

func (PackageOutput) Region

func (o PackageOutput) Region() pulumi.StringOutput

Specifies the region in which to upload packages. If omitted, the provider-level region will be used. Changing this parameter will delete the current package and upload a new package.

func (PackageOutput) Status

func (o PackageOutput) Status() pulumi.StringOutput

Status of a package group to be uploaded.

func (PackageOutput) ToPackageOutput

func (o PackageOutput) ToPackageOutput() PackageOutput

func (PackageOutput) ToPackageOutputWithContext

func (o PackageOutput) ToPackageOutputWithContext(ctx context.Context) PackageOutput

func (PackageOutput) Type

Specifies the package type. + **jar**: `.jar` or jar related files. + **pyFile**: `.py` or python related files. + **file**: Other user files.

func (PackageOutput) UpdatedAt

func (o PackageOutput) UpdatedAt() pulumi.StringOutput

The last time when the package configuration update has complated.

type PackageState

type PackageState struct {
	// Time when a queue is created.
	CreatedAt pulumi.StringPtrInput
	// Specifies the group name which the package belongs to.
	// Changing this parameter will delete the current package and upload a new package.
	GroupName pulumi.StringPtrInput
	// Specifies whether to upload resource packages in asynchronous mode.
	// The default value is **false**. Changing this parameter will delete the current package and upload a new package.
	IsAsync pulumi.BoolPtrInput
	// The package name.
	ObjectName pulumi.StringPtrInput
	// Specifies the OBS storage path where the package is located.
	// For example, `https://{bucket_name}.obs.{region}.myhuaweicloud.com/dli/packages/object_file.py`.
	// Changing this parameter will delete the current package and upload a new package.
	ObjectPath pulumi.StringPtrInput
	// Specifies the name of the package owner. The owner must be IAM user.
	Owner pulumi.StringPtrInput
	// Specifies the region in which to upload packages.
	// If omitted, the provider-level region will be used.
	// Changing this parameter will delete the current package and upload a new package.
	Region pulumi.StringPtrInput
	// Status of a package group to be uploaded.
	Status pulumi.StringPtrInput
	// Specifies the package type.
	// + **jar**: `.jar` or jar related files.
	// + **pyFile**: `.py` or python related files.
	// + **file**: Other user files.
	Type pulumi.StringPtrInput
	// The last time when the package configuration update has complated.
	UpdatedAt pulumi.StringPtrInput
}

func (PackageState) ElementType

func (PackageState) ElementType() reflect.Type

type Permission

type Permission struct {
	pulumi.CustomResourceState

	// Whether this user is an administrator.
	IsAdmin pulumi.BoolOutput `pulumi:"isAdmin"`
	// Specifies which object's data usage permissions will be shared.
	// Its naming format is as follows:
	// + **queues.`queuesName`**: the usage permissions of queue.
	// + **databases.`databaseName`**: the usage permissions of data in the database.
	// + **databases.`databaseName`.tables.`tableName`**: the usage permissions of data in the table.
	// + **databases.`databaseName`.tables.`tableName`.columns.`columnName`**: the usage permissions of data in the column.
	// + **jobs.flink.`flinkJobId`**: the usage permissions of data in the flink job.
	// + **groups.`packageGroupName`**: the usage permissions of data in the package group.
	// + **resources.`packageName`**: the usage permissions of data in the package.
	Object pulumi.StringOutput `pulumi:"object"`
	// Specifies the usage permissions of data.
	// + **Permissions on Queue, Database and Table**,
	//   please see [Permissions Management](https://support.huaweicloud.com/intl/en-us/productdesc-dli/dli_07_0006.html)
	Privileges pulumi.StringArrayOutput `pulumi:"privileges"`
	// The region in which to create the DLI permission resource. If omitted, the
	// provider-level region will be used. Changing this parameter will create a new resource.
	Region pulumi.StringOutput `pulumi:"region"`
	// Specifies name of the user who is granted with usage permission.
	// Changing this parameter will create a new resource.
	UserName pulumi.StringOutput `pulumi:"userName"`
}

Manages the usage permissions of those resources: `Dli.Queue`, `Dli.Database`,

`Dli.Table`, `Dli.Package`, `Dli.FlinksqlJob`, `Dli.FlinkjarJob`
 within HuaweiCloud DLI.

## Example Usage ### Grant a permission of queue

```go package main

import (

"fmt"

"github.com/huaweicloud/pulumi-huaweicloud/sdk/go/huaweicloud/Dli"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		cfg := config.New(ctx, "")
		userName := cfg.RequireObject("userName")
		queueName := cfg.RequireObject("queueName")
		_, err := Dli.NewPermission(ctx, "test", &Dli.PermissionArgs{
			UserName: pulumi.Any(userName),
			Object:   pulumi.String(fmt.Sprintf("queues.%v", queueName)),
			Privileges: pulumi.StringArray{
				pulumi.String("SUBMIT_JOB"),
				pulumi.String("DROP_QUEUE"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}

``` ### Grant a permission of database

```go package main

import (

"fmt"

"github.com/huaweicloud/pulumi-huaweicloud/sdk/go/huaweicloud/Dli"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		cfg := config.New(ctx, "")
		userName := cfg.RequireObject("userName")
		databaseName := cfg.RequireObject("databaseName")
		_, err := Dli.NewPermission(ctx, "test", &Dli.PermissionArgs{
			UserName: pulumi.Any(userName),
			Object:   pulumi.String(fmt.Sprintf("databases.%v", databaseName)),
			Privileges: pulumi.StringArray{
				pulumi.String("SELECT"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

The permission can be imported by `id`, it is composed of `object` and `user_name`, separated by a slash. e.g.

```sh

$ pulumi import huaweicloud:Dli/permission:Permission test databases.database_name/user_name

```

func GetPermission

func GetPermission(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *PermissionState, opts ...pulumi.ResourceOption) (*Permission, error)

GetPermission gets an existing Permission resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewPermission

func NewPermission(ctx *pulumi.Context,
	name string, args *PermissionArgs, opts ...pulumi.ResourceOption) (*Permission, error)

NewPermission registers a new resource with the given unique name, arguments, and options.

func (*Permission) ElementType

func (*Permission) ElementType() reflect.Type

func (*Permission) ToPermissionOutput

func (i *Permission) ToPermissionOutput() PermissionOutput

func (*Permission) ToPermissionOutputWithContext

func (i *Permission) ToPermissionOutputWithContext(ctx context.Context) PermissionOutput

type PermissionArgs

type PermissionArgs struct {
	// Specifies which object's data usage permissions will be shared.
	// Its naming format is as follows:
	// + **queues.`queuesName`**: the usage permissions of queue.
	// + **databases.`databaseName`**: the usage permissions of data in the database.
	// + **databases.`databaseName`.tables.`tableName`**: the usage permissions of data in the table.
	// + **databases.`databaseName`.tables.`tableName`.columns.`columnName`**: the usage permissions of data in the column.
	// + **jobs.flink.`flinkJobId`**: the usage permissions of data in the flink job.
	// + **groups.`packageGroupName`**: the usage permissions of data in the package group.
	// + **resources.`packageName`**: the usage permissions of data in the package.
	Object pulumi.StringInput
	// Specifies the usage permissions of data.
	// + **Permissions on Queue, Database and Table**,
	//   please see [Permissions Management](https://support.huaweicloud.com/intl/en-us/productdesc-dli/dli_07_0006.html)
	Privileges pulumi.StringArrayInput
	// The region in which to create the DLI permission resource. If omitted, the
	// provider-level region will be used. Changing this parameter will create a new resource.
	Region pulumi.StringPtrInput
	// Specifies name of the user who is granted with usage permission.
	// Changing this parameter will create a new resource.
	UserName pulumi.StringInput
}

The set of arguments for constructing a Permission resource.

func (PermissionArgs) ElementType

func (PermissionArgs) ElementType() reflect.Type

type PermissionArray

type PermissionArray []PermissionInput

func (PermissionArray) ElementType

func (PermissionArray) ElementType() reflect.Type

func (PermissionArray) ToPermissionArrayOutput

func (i PermissionArray) ToPermissionArrayOutput() PermissionArrayOutput

func (PermissionArray) ToPermissionArrayOutputWithContext

func (i PermissionArray) ToPermissionArrayOutputWithContext(ctx context.Context) PermissionArrayOutput

type PermissionArrayInput

type PermissionArrayInput interface {
	pulumi.Input

	ToPermissionArrayOutput() PermissionArrayOutput
	ToPermissionArrayOutputWithContext(context.Context) PermissionArrayOutput
}

PermissionArrayInput is an input type that accepts PermissionArray and PermissionArrayOutput values. You can construct a concrete instance of `PermissionArrayInput` via:

PermissionArray{ PermissionArgs{...} }

type PermissionArrayOutput

type PermissionArrayOutput struct{ *pulumi.OutputState }

func (PermissionArrayOutput) ElementType

func (PermissionArrayOutput) ElementType() reflect.Type

func (PermissionArrayOutput) Index

func (PermissionArrayOutput) ToPermissionArrayOutput

func (o PermissionArrayOutput) ToPermissionArrayOutput() PermissionArrayOutput

func (PermissionArrayOutput) ToPermissionArrayOutputWithContext

func (o PermissionArrayOutput) ToPermissionArrayOutputWithContext(ctx context.Context) PermissionArrayOutput

type PermissionInput

type PermissionInput interface {
	pulumi.Input

	ToPermissionOutput() PermissionOutput
	ToPermissionOutputWithContext(ctx context.Context) PermissionOutput
}

type PermissionMap

type PermissionMap map[string]PermissionInput

func (PermissionMap) ElementType

func (PermissionMap) ElementType() reflect.Type

func (PermissionMap) ToPermissionMapOutput

func (i PermissionMap) ToPermissionMapOutput() PermissionMapOutput

func (PermissionMap) ToPermissionMapOutputWithContext

func (i PermissionMap) ToPermissionMapOutputWithContext(ctx context.Context) PermissionMapOutput

type PermissionMapInput

type PermissionMapInput interface {
	pulumi.Input

	ToPermissionMapOutput() PermissionMapOutput
	ToPermissionMapOutputWithContext(context.Context) PermissionMapOutput
}

PermissionMapInput is an input type that accepts PermissionMap and PermissionMapOutput values. You can construct a concrete instance of `PermissionMapInput` via:

PermissionMap{ "key": PermissionArgs{...} }

type PermissionMapOutput

type PermissionMapOutput struct{ *pulumi.OutputState }

func (PermissionMapOutput) ElementType

func (PermissionMapOutput) ElementType() reflect.Type

func (PermissionMapOutput) MapIndex

func (PermissionMapOutput) ToPermissionMapOutput

func (o PermissionMapOutput) ToPermissionMapOutput() PermissionMapOutput

func (PermissionMapOutput) ToPermissionMapOutputWithContext

func (o PermissionMapOutput) ToPermissionMapOutputWithContext(ctx context.Context) PermissionMapOutput

type PermissionOutput

type PermissionOutput struct{ *pulumi.OutputState }

func (PermissionOutput) ElementType

func (PermissionOutput) ElementType() reflect.Type

func (PermissionOutput) IsAdmin

func (o PermissionOutput) IsAdmin() pulumi.BoolOutput

Whether this user is an administrator.

func (PermissionOutput) Object

Specifies which object's data usage permissions will be shared. Its naming format is as follows: + **queues.`queuesName`**: the usage permissions of queue. + **databases.`databaseName`**: the usage permissions of data in the database. + **databases.`databaseName`.tables.`tableName`**: the usage permissions of data in the table. + **databases.`databaseName`.tables.`tableName`.columns.`columnName`**: the usage permissions of data in the column. + **jobs.flink.`flinkJobId`**: the usage permissions of data in the flink job. + **groups.`packageGroupName`**: the usage permissions of data in the package group. + **resources.`packageName`**: the usage permissions of data in the package.

func (PermissionOutput) Privileges

func (o PermissionOutput) Privileges() pulumi.StringArrayOutput

Specifies the usage permissions of data.

func (PermissionOutput) Region

The region in which to create the DLI permission resource. If omitted, the provider-level region will be used. Changing this parameter will create a new resource.

func (PermissionOutput) ToPermissionOutput

func (o PermissionOutput) ToPermissionOutput() PermissionOutput

func (PermissionOutput) ToPermissionOutputWithContext

func (o PermissionOutput) ToPermissionOutputWithContext(ctx context.Context) PermissionOutput

func (PermissionOutput) UserName

func (o PermissionOutput) UserName() pulumi.StringOutput

Specifies name of the user who is granted with usage permission. Changing this parameter will create a new resource.

type PermissionState

type PermissionState struct {
	// Whether this user is an administrator.
	IsAdmin pulumi.BoolPtrInput
	// Specifies which object's data usage permissions will be shared.
	// Its naming format is as follows:
	// + **queues.`queuesName`**: the usage permissions of queue.
	// + **databases.`databaseName`**: the usage permissions of data in the database.
	// + **databases.`databaseName`.tables.`tableName`**: the usage permissions of data in the table.
	// + **databases.`databaseName`.tables.`tableName`.columns.`columnName`**: the usage permissions of data in the column.
	// + **jobs.flink.`flinkJobId`**: the usage permissions of data in the flink job.
	// + **groups.`packageGroupName`**: the usage permissions of data in the package group.
	// + **resources.`packageName`**: the usage permissions of data in the package.
	Object pulumi.StringPtrInput
	// Specifies the usage permissions of data.
	// + **Permissions on Queue, Database and Table**,
	//   please see [Permissions Management](https://support.huaweicloud.com/intl/en-us/productdesc-dli/dli_07_0006.html)
	Privileges pulumi.StringArrayInput
	// The region in which to create the DLI permission resource. If omitted, the
	// provider-level region will be used. Changing this parameter will create a new resource.
	Region pulumi.StringPtrInput
	// Specifies name of the user who is granted with usage permission.
	// Changing this parameter will create a new resource.
	UserName pulumi.StringPtrInput
}

func (PermissionState) ElementType

func (PermissionState) ElementType() reflect.Type

type Queue

type Queue struct {
	pulumi.CustomResourceState

	// Time when a queue is created.
	CreateTime pulumi.IntOutput `pulumi:"createTime"`
	// Minimum number of CUs that are bound to a queue. Initial value can be `16`,
	// `64`, or `256`. When scaleOut or scale_in, the number must be a multiple of 16
	CuCount pulumi.IntOutput `pulumi:"cuCount"`
	// Description of a queue. Changing this parameter will create a new
	// resource.
	Description pulumi.StringOutput `pulumi:"description"`
	// Enterprise project ID. The value 0 indicates the default
	// enterprise project. Changing this parameter will create a new resource.
	EnterpriseProjectId pulumi.StringOutput `pulumi:"enterpriseProjectId"`
	// Indicates the queue feature. Changing this parameter will create a new
	// resource. The options are as follows:
	// + basic: basic type (default value)
	// + ai: AI-enhanced (Only the SQL x8664 dedicated queue supports this option.)
	Feature pulumi.StringPtrOutput `pulumi:"feature"`
	// Deprecated: management_subnet_cidr is Deprecated
	ManagementSubnetCidr pulumi.StringPtrOutput `pulumi:"managementSubnetCidr"`
	// Name of a queue. Name of a newly created resource queue. The name can contain
	// only digits, letters, and underscores (\_), but cannot contain only digits or start with an underscore (_). Length
	// range: 1 to 128 characters. Changing this parameter will create a new resource.
	Name pulumi.StringOutput `pulumi:"name"`
	// CPU architecture of queue compute resources. Changing this parameter will
	// create a new resource. The options are as follows:
	// + x8664 : default value
	// + aarch64
	Platform pulumi.StringPtrOutput `pulumi:"platform"`
	// Indicates the queue type. Changing this parameter will create a new
	// resource. The options are as follows:
	// + sql
	// + general
	QueueType pulumi.StringPtrOutput `pulumi:"queueType"`
	// Specifies the region in which to create the dli queue resource. If omitted,
	// the provider-level region will be used. Changing this will create a new VPC channel resource.
	Region pulumi.StringOutput `pulumi:"region"`
	// Queue resource mode. Changing this parameter will create a new
	// resource. The options are as follows:
	// + 0: indicates the shared resource mode.
	// + 1: indicates the exclusive resource mode.
	ResourceMode pulumi.IntPtrOutput `pulumi:"resourceMode"`
	// Deprecated: subnet_cidr is Deprecated
	SubnetCidr pulumi.StringPtrOutput `pulumi:"subnetCidr"`
	// Label of a queue. Changing this parameter will create a new resource.
	Tags pulumi.StringMapOutput `pulumi:"tags"`
	// The CIDR block of a queue. If use DLI enhanced datasource connections, the CIDR block
	// cannot be the same as that of the data source.
	// The CIDR blocks supported by different CU specifications:
	VpcCidr pulumi.StringOutput `pulumi:"vpcCidr"`
}

Manages DLI Queue resource within HuaweiCloud

## Example Usage ### Create a queue

```go package main

import (

"github.com/huaweicloud/pulumi-huaweicloud/sdk/go/huaweicloud/Dli"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := Dli.NewQueue(ctx, "queue", &Dli.QueueArgs{
			CuCount: pulumi.Int(16),
			Tags: pulumi.StringMap{
				"foo": pulumi.String("bar"),
				"key": pulumi.String("value"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}

``` ### Create a queue with CIDR Block

```go package main

import (

"github.com/huaweicloud/pulumi-huaweicloud/sdk/go/huaweicloud/Dli"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := Dli.NewQueue(ctx, "queue", &Dli.QueueArgs{
			CuCount:      pulumi.Int(16),
			ResourceMode: pulumi.Int(1),
			Tags: pulumi.StringMap{
				"foo": pulumi.String("bar"),
				"key": pulumi.String("value"),
			},
			VpcCidr: pulumi.String("172.16.0.0/14"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

DLI queue can be imported by

`id`. For example,

```sh

$ pulumi import huaweicloud:Dli/queue:Queue example abc123

```

func GetQueue

func GetQueue(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *QueueState, opts ...pulumi.ResourceOption) (*Queue, error)

GetQueue gets an existing Queue resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewQueue

func NewQueue(ctx *pulumi.Context,
	name string, args *QueueArgs, opts ...pulumi.ResourceOption) (*Queue, error)

NewQueue registers a new resource with the given unique name, arguments, and options.

func (*Queue) ElementType

func (*Queue) ElementType() reflect.Type

func (*Queue) ToQueueOutput

func (i *Queue) ToQueueOutput() QueueOutput

func (*Queue) ToQueueOutputWithContext

func (i *Queue) ToQueueOutputWithContext(ctx context.Context) QueueOutput

type QueueArgs

type QueueArgs struct {
	// Minimum number of CUs that are bound to a queue. Initial value can be `16`,
	// `64`, or `256`. When scaleOut or scale_in, the number must be a multiple of 16
	CuCount pulumi.IntInput
	// Description of a queue. Changing this parameter will create a new
	// resource.
	Description pulumi.StringPtrInput
	// Enterprise project ID. The value 0 indicates the default
	// enterprise project. Changing this parameter will create a new resource.
	EnterpriseProjectId pulumi.StringPtrInput
	// Indicates the queue feature. Changing this parameter will create a new
	// resource. The options are as follows:
	// + basic: basic type (default value)
	// + ai: AI-enhanced (Only the SQL x8664 dedicated queue supports this option.)
	Feature pulumi.StringPtrInput
	// Deprecated: management_subnet_cidr is Deprecated
	ManagementSubnetCidr pulumi.StringPtrInput
	// Name of a queue. Name of a newly created resource queue. The name can contain
	// only digits, letters, and underscores (\_), but cannot contain only digits or start with an underscore (_). Length
	// range: 1 to 128 characters. Changing this parameter will create a new resource.
	Name pulumi.StringPtrInput
	// CPU architecture of queue compute resources. Changing this parameter will
	// create a new resource. The options are as follows:
	// + x8664 : default value
	// + aarch64
	Platform pulumi.StringPtrInput
	// Indicates the queue type. Changing this parameter will create a new
	// resource. The options are as follows:
	// + sql
	// + general
	QueueType pulumi.StringPtrInput
	// Specifies the region in which to create the dli queue resource. If omitted,
	// the provider-level region will be used. Changing this will create a new VPC channel resource.
	Region pulumi.StringPtrInput
	// Queue resource mode. Changing this parameter will create a new
	// resource. The options are as follows:
	// + 0: indicates the shared resource mode.
	// + 1: indicates the exclusive resource mode.
	ResourceMode pulumi.IntPtrInput
	// Deprecated: subnet_cidr is Deprecated
	SubnetCidr pulumi.StringPtrInput
	// Label of a queue. Changing this parameter will create a new resource.
	Tags pulumi.StringMapInput
	// The CIDR block of a queue. If use DLI enhanced datasource connections, the CIDR block
	// cannot be the same as that of the data source.
	// The CIDR blocks supported by different CU specifications:
	VpcCidr pulumi.StringPtrInput
}

The set of arguments for constructing a Queue resource.

func (QueueArgs) ElementType

func (QueueArgs) ElementType() reflect.Type

type QueueArray

type QueueArray []QueueInput

func (QueueArray) ElementType

func (QueueArray) ElementType() reflect.Type

func (QueueArray) ToQueueArrayOutput

func (i QueueArray) ToQueueArrayOutput() QueueArrayOutput

func (QueueArray) ToQueueArrayOutputWithContext

func (i QueueArray) ToQueueArrayOutputWithContext(ctx context.Context) QueueArrayOutput

type QueueArrayInput

type QueueArrayInput interface {
	pulumi.Input

	ToQueueArrayOutput() QueueArrayOutput
	ToQueueArrayOutputWithContext(context.Context) QueueArrayOutput
}

QueueArrayInput is an input type that accepts QueueArray and QueueArrayOutput values. You can construct a concrete instance of `QueueArrayInput` via:

QueueArray{ QueueArgs{...} }

type QueueArrayOutput

type QueueArrayOutput struct{ *pulumi.OutputState }

func (QueueArrayOutput) ElementType

func (QueueArrayOutput) ElementType() reflect.Type

func (QueueArrayOutput) Index

func (QueueArrayOutput) ToQueueArrayOutput

func (o QueueArrayOutput) ToQueueArrayOutput() QueueArrayOutput

func (QueueArrayOutput) ToQueueArrayOutputWithContext

func (o QueueArrayOutput) ToQueueArrayOutputWithContext(ctx context.Context) QueueArrayOutput

type QueueInput

type QueueInput interface {
	pulumi.Input

	ToQueueOutput() QueueOutput
	ToQueueOutputWithContext(ctx context.Context) QueueOutput
}

type QueueMap

type QueueMap map[string]QueueInput

func (QueueMap) ElementType

func (QueueMap) ElementType() reflect.Type

func (QueueMap) ToQueueMapOutput

func (i QueueMap) ToQueueMapOutput() QueueMapOutput

func (QueueMap) ToQueueMapOutputWithContext

func (i QueueMap) ToQueueMapOutputWithContext(ctx context.Context) QueueMapOutput

type QueueMapInput

type QueueMapInput interface {
	pulumi.Input

	ToQueueMapOutput() QueueMapOutput
	ToQueueMapOutputWithContext(context.Context) QueueMapOutput
}

QueueMapInput is an input type that accepts QueueMap and QueueMapOutput values. You can construct a concrete instance of `QueueMapInput` via:

QueueMap{ "key": QueueArgs{...} }

type QueueMapOutput

type QueueMapOutput struct{ *pulumi.OutputState }

func (QueueMapOutput) ElementType

func (QueueMapOutput) ElementType() reflect.Type

func (QueueMapOutput) MapIndex

func (QueueMapOutput) ToQueueMapOutput

func (o QueueMapOutput) ToQueueMapOutput() QueueMapOutput

func (QueueMapOutput) ToQueueMapOutputWithContext

func (o QueueMapOutput) ToQueueMapOutputWithContext(ctx context.Context) QueueMapOutput

type QueueOutput

type QueueOutput struct{ *pulumi.OutputState }

func (QueueOutput) CreateTime

func (o QueueOutput) CreateTime() pulumi.IntOutput

Time when a queue is created.

func (QueueOutput) CuCount

func (o QueueOutput) CuCount() pulumi.IntOutput

Minimum number of CUs that are bound to a queue. Initial value can be `16`, `64`, or `256`. When scaleOut or scale_in, the number must be a multiple of 16

func (QueueOutput) Description

func (o QueueOutput) Description() pulumi.StringOutput

Description of a queue. Changing this parameter will create a new resource.

func (QueueOutput) ElementType

func (QueueOutput) ElementType() reflect.Type

func (QueueOutput) EnterpriseProjectId

func (o QueueOutput) EnterpriseProjectId() pulumi.StringOutput

Enterprise project ID. The value 0 indicates the default enterprise project. Changing this parameter will create a new resource.

func (QueueOutput) Feature

func (o QueueOutput) Feature() pulumi.StringPtrOutput

Indicates the queue feature. Changing this parameter will create a new resource. The options are as follows: + basic: basic type (default value) + ai: AI-enhanced (Only the SQL x8664 dedicated queue supports this option.)

func (QueueOutput) ManagementSubnetCidr deprecated

func (o QueueOutput) ManagementSubnetCidr() pulumi.StringPtrOutput

Deprecated: management_subnet_cidr is Deprecated

func (QueueOutput) Name

func (o QueueOutput) Name() pulumi.StringOutput

Name of a queue. Name of a newly created resource queue. The name can contain only digits, letters, and underscores (\_), but cannot contain only digits or start with an underscore (_). Length range: 1 to 128 characters. Changing this parameter will create a new resource.

func (QueueOutput) Platform

func (o QueueOutput) Platform() pulumi.StringPtrOutput

CPU architecture of queue compute resources. Changing this parameter will create a new resource. The options are as follows: + x8664 : default value + aarch64

func (QueueOutput) QueueType

func (o QueueOutput) QueueType() pulumi.StringPtrOutput

Indicates the queue type. Changing this parameter will create a new resource. The options are as follows: + sql + general

func (QueueOutput) Region

func (o QueueOutput) Region() pulumi.StringOutput

Specifies the region in which to create the dli queue resource. If omitted, the provider-level region will be used. Changing this will create a new VPC channel resource.

func (QueueOutput) ResourceMode

func (o QueueOutput) ResourceMode() pulumi.IntPtrOutput

Queue resource mode. Changing this parameter will create a new resource. The options are as follows: + 0: indicates the shared resource mode. + 1: indicates the exclusive resource mode.

func (QueueOutput) SubnetCidr deprecated

func (o QueueOutput) SubnetCidr() pulumi.StringPtrOutput

Deprecated: subnet_cidr is Deprecated

func (QueueOutput) Tags

Label of a queue. Changing this parameter will create a new resource.

func (QueueOutput) ToQueueOutput

func (o QueueOutput) ToQueueOutput() QueueOutput

func (QueueOutput) ToQueueOutputWithContext

func (o QueueOutput) ToQueueOutputWithContext(ctx context.Context) QueueOutput

func (QueueOutput) VpcCidr

func (o QueueOutput) VpcCidr() pulumi.StringOutput

The CIDR block of a queue. If use DLI enhanced datasource connections, the CIDR block cannot be the same as that of the data source. The CIDR blocks supported by different CU specifications:

type QueueState

type QueueState struct {
	// Time when a queue is created.
	CreateTime pulumi.IntPtrInput
	// Minimum number of CUs that are bound to a queue. Initial value can be `16`,
	// `64`, or `256`. When scaleOut or scale_in, the number must be a multiple of 16
	CuCount pulumi.IntPtrInput
	// Description of a queue. Changing this parameter will create a new
	// resource.
	Description pulumi.StringPtrInput
	// Enterprise project ID. The value 0 indicates the default
	// enterprise project. Changing this parameter will create a new resource.
	EnterpriseProjectId pulumi.StringPtrInput
	// Indicates the queue feature. Changing this parameter will create a new
	// resource. The options are as follows:
	// + basic: basic type (default value)
	// + ai: AI-enhanced (Only the SQL x8664 dedicated queue supports this option.)
	Feature pulumi.StringPtrInput
	// Deprecated: management_subnet_cidr is Deprecated
	ManagementSubnetCidr pulumi.StringPtrInput
	// Name of a queue. Name of a newly created resource queue. The name can contain
	// only digits, letters, and underscores (\_), but cannot contain only digits or start with an underscore (_). Length
	// range: 1 to 128 characters. Changing this parameter will create a new resource.
	Name pulumi.StringPtrInput
	// CPU architecture of queue compute resources. Changing this parameter will
	// create a new resource. The options are as follows:
	// + x8664 : default value
	// + aarch64
	Platform pulumi.StringPtrInput
	// Indicates the queue type. Changing this parameter will create a new
	// resource. The options are as follows:
	// + sql
	// + general
	QueueType pulumi.StringPtrInput
	// Specifies the region in which to create the dli queue resource. If omitted,
	// the provider-level region will be used. Changing this will create a new VPC channel resource.
	Region pulumi.StringPtrInput
	// Queue resource mode. Changing this parameter will create a new
	// resource. The options are as follows:
	// + 0: indicates the shared resource mode.
	// + 1: indicates the exclusive resource mode.
	ResourceMode pulumi.IntPtrInput
	// Deprecated: subnet_cidr is Deprecated
	SubnetCidr pulumi.StringPtrInput
	// Label of a queue. Changing this parameter will create a new resource.
	Tags pulumi.StringMapInput
	// The CIDR block of a queue. If use DLI enhanced datasource connections, the CIDR block
	// cannot be the same as that of the data source.
	// The CIDR blocks supported by different CU specifications:
	VpcCidr pulumi.StringPtrInput
}

func (QueueState) ElementType

func (QueueState) ElementType() reflect.Type

type SparkJob

type SparkJob struct {
	pulumi.CustomResourceState

	// Specifies the name of the package that is of the JAR or python file type and
	// has been uploaded to the DLI resource management system.
	// The OBS paths are allowed, for example, `obs://<bucket name>/<package name>`.
	// Changing this parameter will submit a new spark job.
	AppName pulumi.StringOutput `pulumi:"appName"`
	// Specifies the input parameters of the main class.
	// Changing this parameter will submit a new spark job.
	AppParameters pulumi.StringPtrOutput `pulumi:"appParameters"`
	// Specifies the configuration items of the DLI spark.
	// Please following the document of Spark [configurations](https://spark.apache.org/docs/latest/configuration.html) for
	// this argument. If you want to enable the `access metadata` of DLI spark in HuaweiCloud, please set
	// `spark.dli.metaAccess.enable` to `true`. Changing this parameter will submit a new spark job.
	Configurations pulumi.StringMapOutput `pulumi:"configurations"`
	// Time of the DLI spark job submit.
	CreatedAt pulumi.StringOutput `pulumi:"createdAt"`
	// Specifies a list of package resource objects.
	// The object structure is documented below.
	// Changing this parameter will submit a new spark job.
	DependentPackages SparkJobDependentPackageArrayOutput `pulumi:"dependentPackages"`
	// Specifies the number of CPU cores of the Spark application driver.
	// The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	DriverCores pulumi.IntPtrOutput `pulumi:"driverCores"`
	// Specifies the driver memory of the spark application.
	// The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	DriverMemory pulumi.StringPtrOutput `pulumi:"driverMemory"`
	// Specifies the number of CPU cores of each executor in the Spark
	// application. The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	ExecutorCores pulumi.IntPtrOutput `pulumi:"executorCores"`
	// Specifies the executor memory of the spark application.
	// application. The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	ExecutorMemory pulumi.StringPtrOutput `pulumi:"executorMemory"`
	// Specifies the number of executors in a spark application.
	// The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	Executors pulumi.IntPtrOutput `pulumi:"executors"`
	// Specifies a list of the other dependencies name which has been uploaded to the
	// DLI resource management system. The OBS paths are allowed, for example, `obs://<bucket name>/<dependent files>`.
	// Changing this parameter will submit a new spark job.
	Files pulumi.StringArrayOutput `pulumi:"files"`
	// Specifies a list of the jar package name which has been uploaded to the DLI
	// resource management system. The OBS paths are allowed, for example, `obs://<bucket name>/<package name>`.
	// Changing this parameter will submit a new spark job.
	Jars pulumi.StringArrayOutput `pulumi:"jars"`
	// Specifies the main class of the spark job.
	// Required if the `appName` is the JAR type.
	// Changing this parameter will submit a new spark job.
	MainClass pulumi.StringPtrOutput `pulumi:"mainClass"`
	// Specifies the maximum retry times.
	// The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	MaxRetries pulumi.IntPtrOutput `pulumi:"maxRetries"`
	// Specifies a list of modules that depend on system resources.
	// The dependent modules and corresponding services are as follows.
	// Changing this parameter will submit a new spark job.
	// + **sys.datasource.hbase**: CloudTable/MRS HBase
	// + **sys.datasource.opentsdb**: CloudTable/MRS OpenTSDB
	// + **sys.datasource.rds**: RDS MySQL
	// + **sys.datasource.css**: CSS
	Modules pulumi.StringArrayOutput `pulumi:"modules"`
	// Specifies the spark job name.
	// The value contains a maximum of 128 characters.
	// Changing this parameter will submit a new spark job.
	Name pulumi.StringOutput `pulumi:"name"`
	// The owner of the spark job.
	Owner pulumi.StringOutput `pulumi:"owner"`
	// Specifies a list of the python file name which has been uploaded to the
	// DLI resource management system. The OBS paths are allowed, for example, `obs://<bucket name>/<python file name>`.
	// Changing this parameter will submit a new spark job.
	PythonFiles pulumi.StringArrayOutput `pulumi:"pythonFiles"`
	// Specifies the DLI queue name.
	// Changing this parameter will submit a new spark job.
	QueueName pulumi.StringOutput `pulumi:"queueName"`
	// Specifies the region in which to submit a spark job.
	// If omitted, the provider-level region will be used.
	// Changing this parameter will submit a new spark job.
	Region pulumi.StringOutput `pulumi:"region"`
	// Specifies the compute resource type for spark application.
	// The available types and related specifications are as follows, default to minimum configuration (type **A**).
	// Changing this parameter will submit a new spark job.
	Specification pulumi.StringPtrOutput `pulumi:"specification"`
}

Manages spark job resource of DLI within HuaweiCloud

## Example Usage ### Submit a new spark job with jar packages

```go package main

import (

"github.com/huaweicloud/pulumi-huaweicloud/sdk/go/huaweicloud/Dli"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := Dli.NewSparkJob(ctx, "default", &Dli.SparkJobArgs{
			QueueName:     pulumi.Any(_var.Queue_name),
			AppName:       pulumi.String("driver_package/driver_behavior.jar"),
			MainClass:     pulumi.String("driver_behavior"),
			Specification: pulumi.String("B"),
			MaxRetries:    pulumi.Int(20),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

func GetSparkJob

func GetSparkJob(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *SparkJobState, opts ...pulumi.ResourceOption) (*SparkJob, error)

GetSparkJob gets an existing SparkJob resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewSparkJob

func NewSparkJob(ctx *pulumi.Context,
	name string, args *SparkJobArgs, opts ...pulumi.ResourceOption) (*SparkJob, error)

NewSparkJob registers a new resource with the given unique name, arguments, and options.

func (*SparkJob) ElementType

func (*SparkJob) ElementType() reflect.Type

func (*SparkJob) ToSparkJobOutput

func (i *SparkJob) ToSparkJobOutput() SparkJobOutput

func (*SparkJob) ToSparkJobOutputWithContext

func (i *SparkJob) ToSparkJobOutputWithContext(ctx context.Context) SparkJobOutput

type SparkJobArgs

type SparkJobArgs struct {
	// Specifies the name of the package that is of the JAR or python file type and
	// has been uploaded to the DLI resource management system.
	// The OBS paths are allowed, for example, `obs://<bucket name>/<package name>`.
	// Changing this parameter will submit a new spark job.
	AppName pulumi.StringInput
	// Specifies the input parameters of the main class.
	// Changing this parameter will submit a new spark job.
	AppParameters pulumi.StringPtrInput
	// Specifies the configuration items of the DLI spark.
	// Please following the document of Spark [configurations](https://spark.apache.org/docs/latest/configuration.html) for
	// this argument. If you want to enable the `access metadata` of DLI spark in HuaweiCloud, please set
	// `spark.dli.metaAccess.enable` to `true`. Changing this parameter will submit a new spark job.
	Configurations pulumi.StringMapInput
	// Specifies a list of package resource objects.
	// The object structure is documented below.
	// Changing this parameter will submit a new spark job.
	DependentPackages SparkJobDependentPackageArrayInput
	// Specifies the number of CPU cores of the Spark application driver.
	// The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	DriverCores pulumi.IntPtrInput
	// Specifies the driver memory of the spark application.
	// The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	DriverMemory pulumi.StringPtrInput
	// Specifies the number of CPU cores of each executor in the Spark
	// application. The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	ExecutorCores pulumi.IntPtrInput
	// Specifies the executor memory of the spark application.
	// application. The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	ExecutorMemory pulumi.StringPtrInput
	// Specifies the number of executors in a spark application.
	// The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	Executors pulumi.IntPtrInput
	// Specifies a list of the other dependencies name which has been uploaded to the
	// DLI resource management system. The OBS paths are allowed, for example, `obs://<bucket name>/<dependent files>`.
	// Changing this parameter will submit a new spark job.
	Files pulumi.StringArrayInput
	// Specifies a list of the jar package name which has been uploaded to the DLI
	// resource management system. The OBS paths are allowed, for example, `obs://<bucket name>/<package name>`.
	// Changing this parameter will submit a new spark job.
	Jars pulumi.StringArrayInput
	// Specifies the main class of the spark job.
	// Required if the `appName` is the JAR type.
	// Changing this parameter will submit a new spark job.
	MainClass pulumi.StringPtrInput
	// Specifies the maximum retry times.
	// The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	MaxRetries pulumi.IntPtrInput
	// Specifies a list of modules that depend on system resources.
	// The dependent modules and corresponding services are as follows.
	// Changing this parameter will submit a new spark job.
	// + **sys.datasource.hbase**: CloudTable/MRS HBase
	// + **sys.datasource.opentsdb**: CloudTable/MRS OpenTSDB
	// + **sys.datasource.rds**: RDS MySQL
	// + **sys.datasource.css**: CSS
	Modules pulumi.StringArrayInput
	// Specifies the spark job name.
	// The value contains a maximum of 128 characters.
	// Changing this parameter will submit a new spark job.
	Name pulumi.StringPtrInput
	// Specifies a list of the python file name which has been uploaded to the
	// DLI resource management system. The OBS paths are allowed, for example, `obs://<bucket name>/<python file name>`.
	// Changing this parameter will submit a new spark job.
	PythonFiles pulumi.StringArrayInput
	// Specifies the DLI queue name.
	// Changing this parameter will submit a new spark job.
	QueueName pulumi.StringInput
	// Specifies the region in which to submit a spark job.
	// If omitted, the provider-level region will be used.
	// Changing this parameter will submit a new spark job.
	Region pulumi.StringPtrInput
	// Specifies the compute resource type for spark application.
	// The available types and related specifications are as follows, default to minimum configuration (type **A**).
	// Changing this parameter will submit a new spark job.
	Specification pulumi.StringPtrInput
}

The set of arguments for constructing a SparkJob resource.

func (SparkJobArgs) ElementType

func (SparkJobArgs) ElementType() reflect.Type

type SparkJobArray

type SparkJobArray []SparkJobInput

func (SparkJobArray) ElementType

func (SparkJobArray) ElementType() reflect.Type

func (SparkJobArray) ToSparkJobArrayOutput

func (i SparkJobArray) ToSparkJobArrayOutput() SparkJobArrayOutput

func (SparkJobArray) ToSparkJobArrayOutputWithContext

func (i SparkJobArray) ToSparkJobArrayOutputWithContext(ctx context.Context) SparkJobArrayOutput

type SparkJobArrayInput

type SparkJobArrayInput interface {
	pulumi.Input

	ToSparkJobArrayOutput() SparkJobArrayOutput
	ToSparkJobArrayOutputWithContext(context.Context) SparkJobArrayOutput
}

SparkJobArrayInput is an input type that accepts SparkJobArray and SparkJobArrayOutput values. You can construct a concrete instance of `SparkJobArrayInput` via:

SparkJobArray{ SparkJobArgs{...} }

type SparkJobArrayOutput

type SparkJobArrayOutput struct{ *pulumi.OutputState }

func (SparkJobArrayOutput) ElementType

func (SparkJobArrayOutput) ElementType() reflect.Type

func (SparkJobArrayOutput) Index

func (SparkJobArrayOutput) ToSparkJobArrayOutput

func (o SparkJobArrayOutput) ToSparkJobArrayOutput() SparkJobArrayOutput

func (SparkJobArrayOutput) ToSparkJobArrayOutputWithContext

func (o SparkJobArrayOutput) ToSparkJobArrayOutputWithContext(ctx context.Context) SparkJobArrayOutput

type SparkJobDependentPackage

type SparkJobDependentPackage struct {
	// Specifies the user group name.
	// Changing this parameter will submit a new spark job.
	GroupName string `pulumi:"groupName"`
	// Specifies the user group resource for details.
	// Changing this parameter will submit a new spark job.
	// The object structure is documented below.
	Packages []SparkJobDependentPackagePackage `pulumi:"packages"`
}

type SparkJobDependentPackageArgs

type SparkJobDependentPackageArgs struct {
	// Specifies the user group name.
	// Changing this parameter will submit a new spark job.
	GroupName pulumi.StringInput `pulumi:"groupName"`
	// Specifies the user group resource for details.
	// Changing this parameter will submit a new spark job.
	// The object structure is documented below.
	Packages SparkJobDependentPackagePackageArrayInput `pulumi:"packages"`
}

func (SparkJobDependentPackageArgs) ElementType

func (SparkJobDependentPackageArgs) ToSparkJobDependentPackageOutput

func (i SparkJobDependentPackageArgs) ToSparkJobDependentPackageOutput() SparkJobDependentPackageOutput

func (SparkJobDependentPackageArgs) ToSparkJobDependentPackageOutputWithContext

func (i SparkJobDependentPackageArgs) ToSparkJobDependentPackageOutputWithContext(ctx context.Context) SparkJobDependentPackageOutput

type SparkJobDependentPackageArray

type SparkJobDependentPackageArray []SparkJobDependentPackageInput

func (SparkJobDependentPackageArray) ElementType

func (SparkJobDependentPackageArray) ToSparkJobDependentPackageArrayOutput

func (i SparkJobDependentPackageArray) ToSparkJobDependentPackageArrayOutput() SparkJobDependentPackageArrayOutput

func (SparkJobDependentPackageArray) ToSparkJobDependentPackageArrayOutputWithContext

func (i SparkJobDependentPackageArray) ToSparkJobDependentPackageArrayOutputWithContext(ctx context.Context) SparkJobDependentPackageArrayOutput

type SparkJobDependentPackageArrayInput

type SparkJobDependentPackageArrayInput interface {
	pulumi.Input

	ToSparkJobDependentPackageArrayOutput() SparkJobDependentPackageArrayOutput
	ToSparkJobDependentPackageArrayOutputWithContext(context.Context) SparkJobDependentPackageArrayOutput
}

SparkJobDependentPackageArrayInput is an input type that accepts SparkJobDependentPackageArray and SparkJobDependentPackageArrayOutput values. You can construct a concrete instance of `SparkJobDependentPackageArrayInput` via:

SparkJobDependentPackageArray{ SparkJobDependentPackageArgs{...} }

type SparkJobDependentPackageArrayOutput

type SparkJobDependentPackageArrayOutput struct{ *pulumi.OutputState }

func (SparkJobDependentPackageArrayOutput) ElementType

func (SparkJobDependentPackageArrayOutput) Index

func (SparkJobDependentPackageArrayOutput) ToSparkJobDependentPackageArrayOutput

func (o SparkJobDependentPackageArrayOutput) ToSparkJobDependentPackageArrayOutput() SparkJobDependentPackageArrayOutput

func (SparkJobDependentPackageArrayOutput) ToSparkJobDependentPackageArrayOutputWithContext

func (o SparkJobDependentPackageArrayOutput) ToSparkJobDependentPackageArrayOutputWithContext(ctx context.Context) SparkJobDependentPackageArrayOutput

type SparkJobDependentPackageInput

type SparkJobDependentPackageInput interface {
	pulumi.Input

	ToSparkJobDependentPackageOutput() SparkJobDependentPackageOutput
	ToSparkJobDependentPackageOutputWithContext(context.Context) SparkJobDependentPackageOutput
}

SparkJobDependentPackageInput is an input type that accepts SparkJobDependentPackageArgs and SparkJobDependentPackageOutput values. You can construct a concrete instance of `SparkJobDependentPackageInput` via:

SparkJobDependentPackageArgs{...}

type SparkJobDependentPackageOutput

type SparkJobDependentPackageOutput struct{ *pulumi.OutputState }

func (SparkJobDependentPackageOutput) ElementType

func (SparkJobDependentPackageOutput) GroupName

Specifies the user group name. Changing this parameter will submit a new spark job.

func (SparkJobDependentPackageOutput) Packages

Specifies the user group resource for details. Changing this parameter will submit a new spark job. The object structure is documented below.

func (SparkJobDependentPackageOutput) ToSparkJobDependentPackageOutput

func (o SparkJobDependentPackageOutput) ToSparkJobDependentPackageOutput() SparkJobDependentPackageOutput

func (SparkJobDependentPackageOutput) ToSparkJobDependentPackageOutputWithContext

func (o SparkJobDependentPackageOutput) ToSparkJobDependentPackageOutputWithContext(ctx context.Context) SparkJobDependentPackageOutput

type SparkJobDependentPackagePackage

type SparkJobDependentPackagePackage struct {
	// Specifies the resource name of the package.
	// Changing this parameter will submit a new spark job.
	PackageName string `pulumi:"packageName"`
	// Specifies the resource type of the package.
	// Changing this parameter will submit a new spark job.
	Type string `pulumi:"type"`
}

type SparkJobDependentPackagePackageArgs

type SparkJobDependentPackagePackageArgs struct {
	// Specifies the resource name of the package.
	// Changing this parameter will submit a new spark job.
	PackageName pulumi.StringInput `pulumi:"packageName"`
	// Specifies the resource type of the package.
	// Changing this parameter will submit a new spark job.
	Type pulumi.StringInput `pulumi:"type"`
}

func (SparkJobDependentPackagePackageArgs) ElementType

func (SparkJobDependentPackagePackageArgs) ToSparkJobDependentPackagePackageOutput

func (i SparkJobDependentPackagePackageArgs) ToSparkJobDependentPackagePackageOutput() SparkJobDependentPackagePackageOutput

func (SparkJobDependentPackagePackageArgs) ToSparkJobDependentPackagePackageOutputWithContext

func (i SparkJobDependentPackagePackageArgs) ToSparkJobDependentPackagePackageOutputWithContext(ctx context.Context) SparkJobDependentPackagePackageOutput

type SparkJobDependentPackagePackageArray

type SparkJobDependentPackagePackageArray []SparkJobDependentPackagePackageInput

func (SparkJobDependentPackagePackageArray) ElementType

func (SparkJobDependentPackagePackageArray) ToSparkJobDependentPackagePackageArrayOutput

func (i SparkJobDependentPackagePackageArray) ToSparkJobDependentPackagePackageArrayOutput() SparkJobDependentPackagePackageArrayOutput

func (SparkJobDependentPackagePackageArray) ToSparkJobDependentPackagePackageArrayOutputWithContext

func (i SparkJobDependentPackagePackageArray) ToSparkJobDependentPackagePackageArrayOutputWithContext(ctx context.Context) SparkJobDependentPackagePackageArrayOutput

type SparkJobDependentPackagePackageArrayInput

type SparkJobDependentPackagePackageArrayInput interface {
	pulumi.Input

	ToSparkJobDependentPackagePackageArrayOutput() SparkJobDependentPackagePackageArrayOutput
	ToSparkJobDependentPackagePackageArrayOutputWithContext(context.Context) SparkJobDependentPackagePackageArrayOutput
}

SparkJobDependentPackagePackageArrayInput is an input type that accepts SparkJobDependentPackagePackageArray and SparkJobDependentPackagePackageArrayOutput values. You can construct a concrete instance of `SparkJobDependentPackagePackageArrayInput` via:

SparkJobDependentPackagePackageArray{ SparkJobDependentPackagePackageArgs{...} }

type SparkJobDependentPackagePackageArrayOutput

type SparkJobDependentPackagePackageArrayOutput struct{ *pulumi.OutputState }

func (SparkJobDependentPackagePackageArrayOutput) ElementType

func (SparkJobDependentPackagePackageArrayOutput) Index

func (SparkJobDependentPackagePackageArrayOutput) ToSparkJobDependentPackagePackageArrayOutput

func (o SparkJobDependentPackagePackageArrayOutput) ToSparkJobDependentPackagePackageArrayOutput() SparkJobDependentPackagePackageArrayOutput

func (SparkJobDependentPackagePackageArrayOutput) ToSparkJobDependentPackagePackageArrayOutputWithContext

func (o SparkJobDependentPackagePackageArrayOutput) ToSparkJobDependentPackagePackageArrayOutputWithContext(ctx context.Context) SparkJobDependentPackagePackageArrayOutput

type SparkJobDependentPackagePackageInput

type SparkJobDependentPackagePackageInput interface {
	pulumi.Input

	ToSparkJobDependentPackagePackageOutput() SparkJobDependentPackagePackageOutput
	ToSparkJobDependentPackagePackageOutputWithContext(context.Context) SparkJobDependentPackagePackageOutput
}

SparkJobDependentPackagePackageInput is an input type that accepts SparkJobDependentPackagePackageArgs and SparkJobDependentPackagePackageOutput values. You can construct a concrete instance of `SparkJobDependentPackagePackageInput` via:

SparkJobDependentPackagePackageArgs{...}

type SparkJobDependentPackagePackageOutput

type SparkJobDependentPackagePackageOutput struct{ *pulumi.OutputState }

func (SparkJobDependentPackagePackageOutput) ElementType

func (SparkJobDependentPackagePackageOutput) PackageName

Specifies the resource name of the package. Changing this parameter will submit a new spark job.

func (SparkJobDependentPackagePackageOutput) ToSparkJobDependentPackagePackageOutput

func (o SparkJobDependentPackagePackageOutput) ToSparkJobDependentPackagePackageOutput() SparkJobDependentPackagePackageOutput

func (SparkJobDependentPackagePackageOutput) ToSparkJobDependentPackagePackageOutputWithContext

func (o SparkJobDependentPackagePackageOutput) ToSparkJobDependentPackagePackageOutputWithContext(ctx context.Context) SparkJobDependentPackagePackageOutput

func (SparkJobDependentPackagePackageOutput) Type

Specifies the resource type of the package. Changing this parameter will submit a new spark job.

type SparkJobInput

type SparkJobInput interface {
	pulumi.Input

	ToSparkJobOutput() SparkJobOutput
	ToSparkJobOutputWithContext(ctx context.Context) SparkJobOutput
}

type SparkJobMap

type SparkJobMap map[string]SparkJobInput

func (SparkJobMap) ElementType

func (SparkJobMap) ElementType() reflect.Type

func (SparkJobMap) ToSparkJobMapOutput

func (i SparkJobMap) ToSparkJobMapOutput() SparkJobMapOutput

func (SparkJobMap) ToSparkJobMapOutputWithContext

func (i SparkJobMap) ToSparkJobMapOutputWithContext(ctx context.Context) SparkJobMapOutput

type SparkJobMapInput

type SparkJobMapInput interface {
	pulumi.Input

	ToSparkJobMapOutput() SparkJobMapOutput
	ToSparkJobMapOutputWithContext(context.Context) SparkJobMapOutput
}

SparkJobMapInput is an input type that accepts SparkJobMap and SparkJobMapOutput values. You can construct a concrete instance of `SparkJobMapInput` via:

SparkJobMap{ "key": SparkJobArgs{...} }

type SparkJobMapOutput

type SparkJobMapOutput struct{ *pulumi.OutputState }

func (SparkJobMapOutput) ElementType

func (SparkJobMapOutput) ElementType() reflect.Type

func (SparkJobMapOutput) MapIndex

func (SparkJobMapOutput) ToSparkJobMapOutput

func (o SparkJobMapOutput) ToSparkJobMapOutput() SparkJobMapOutput

func (SparkJobMapOutput) ToSparkJobMapOutputWithContext

func (o SparkJobMapOutput) ToSparkJobMapOutputWithContext(ctx context.Context) SparkJobMapOutput

type SparkJobOutput

type SparkJobOutput struct{ *pulumi.OutputState }

func (SparkJobOutput) AppName

func (o SparkJobOutput) AppName() pulumi.StringOutput

Specifies the name of the package that is of the JAR or python file type and has been uploaded to the DLI resource management system. The OBS paths are allowed, for example, `obs://<bucket name>/<package name>`. Changing this parameter will submit a new spark job.

func (SparkJobOutput) AppParameters

func (o SparkJobOutput) AppParameters() pulumi.StringPtrOutput

Specifies the input parameters of the main class. Changing this parameter will submit a new spark job.

func (SparkJobOutput) Configurations

func (o SparkJobOutput) Configurations() pulumi.StringMapOutput

Specifies the configuration items of the DLI spark. Please following the document of Spark [configurations](https://spark.apache.org/docs/latest/configuration.html) for this argument. If you want to enable the `access metadata` of DLI spark in HuaweiCloud, please set `spark.dli.metaAccess.enable` to `true`. Changing this parameter will submit a new spark job.

func (SparkJobOutput) CreatedAt

func (o SparkJobOutput) CreatedAt() pulumi.StringOutput

Time of the DLI spark job submit.

func (SparkJobOutput) DependentPackages

Specifies a list of package resource objects. The object structure is documented below. Changing this parameter will submit a new spark job.

func (SparkJobOutput) DriverCores

func (o SparkJobOutput) DriverCores() pulumi.IntPtrOutput

Specifies the number of CPU cores of the Spark application driver. The default value of this value corresponds to the configuration of the selected `specification`. If you set this value instead of the default value, `specification` will be invalid. Changing this parameter will submit a new spark job.

func (SparkJobOutput) DriverMemory

func (o SparkJobOutput) DriverMemory() pulumi.StringPtrOutput

Specifies the driver memory of the spark application. The default value of this value corresponds to the configuration of the selected `specification`. If you set this value instead of the default value, `specification` will be invalid. Changing this parameter will submit a new spark job.

func (SparkJobOutput) ElementType

func (SparkJobOutput) ElementType() reflect.Type

func (SparkJobOutput) ExecutorCores

func (o SparkJobOutput) ExecutorCores() pulumi.IntPtrOutput

Specifies the number of CPU cores of each executor in the Spark application. The default value of this value corresponds to the configuration of the selected `specification`. If you set this value instead of the default value, `specification` will be invalid. Changing this parameter will submit a new spark job.

func (SparkJobOutput) ExecutorMemory

func (o SparkJobOutput) ExecutorMemory() pulumi.StringPtrOutput

Specifies the executor memory of the spark application. application. The default value of this value corresponds to the configuration of the selected `specification`. If you set this value instead of the default value, `specification` will be invalid. Changing this parameter will submit a new spark job.

func (SparkJobOutput) Executors

func (o SparkJobOutput) Executors() pulumi.IntPtrOutput

Specifies the number of executors in a spark application. The default value of this value corresponds to the configuration of the selected `specification`. If you set this value instead of the default value, `specification` will be invalid. Changing this parameter will submit a new spark job.

func (SparkJobOutput) Files

Specifies a list of the other dependencies name which has been uploaded to the DLI resource management system. The OBS paths are allowed, for example, `obs://<bucket name>/<dependent files>`. Changing this parameter will submit a new spark job.

func (SparkJobOutput) Jars

Specifies a list of the jar package name which has been uploaded to the DLI resource management system. The OBS paths are allowed, for example, `obs://<bucket name>/<package name>`. Changing this parameter will submit a new spark job.

func (SparkJobOutput) MainClass

func (o SparkJobOutput) MainClass() pulumi.StringPtrOutput

Specifies the main class of the spark job. Required if the `appName` is the JAR type. Changing this parameter will submit a new spark job.

func (SparkJobOutput) MaxRetries

func (o SparkJobOutput) MaxRetries() pulumi.IntPtrOutput

Specifies the maximum retry times. The default value of this value corresponds to the configuration of the selected `specification`. If you set this value instead of the default value, `specification` will be invalid. Changing this parameter will submit a new spark job.

func (SparkJobOutput) Modules

Specifies a list of modules that depend on system resources. The dependent modules and corresponding services are as follows. Changing this parameter will submit a new spark job. + **sys.datasource.hbase**: CloudTable/MRS HBase + **sys.datasource.opentsdb**: CloudTable/MRS OpenTSDB + **sys.datasource.rds**: RDS MySQL + **sys.datasource.css**: CSS

func (SparkJobOutput) Name

Specifies the spark job name. The value contains a maximum of 128 characters. Changing this parameter will submit a new spark job.

func (SparkJobOutput) Owner

The owner of the spark job.

func (SparkJobOutput) PythonFiles

func (o SparkJobOutput) PythonFiles() pulumi.StringArrayOutput

Specifies a list of the python file name which has been uploaded to the DLI resource management system. The OBS paths are allowed, for example, `obs://<bucket name>/<python file name>`. Changing this parameter will submit a new spark job.

func (SparkJobOutput) QueueName

func (o SparkJobOutput) QueueName() pulumi.StringOutput

Specifies the DLI queue name. Changing this parameter will submit a new spark job.

func (SparkJobOutput) Region

func (o SparkJobOutput) Region() pulumi.StringOutput

Specifies the region in which to submit a spark job. If omitted, the provider-level region will be used. Changing this parameter will submit a new spark job.

func (SparkJobOutput) Specification

func (o SparkJobOutput) Specification() pulumi.StringPtrOutput

Specifies the compute resource type for spark application. The available types and related specifications are as follows, default to minimum configuration (type **A**). Changing this parameter will submit a new spark job.

func (SparkJobOutput) ToSparkJobOutput

func (o SparkJobOutput) ToSparkJobOutput() SparkJobOutput

func (SparkJobOutput) ToSparkJobOutputWithContext

func (o SparkJobOutput) ToSparkJobOutputWithContext(ctx context.Context) SparkJobOutput

type SparkJobState

type SparkJobState struct {
	// Specifies the name of the package that is of the JAR or python file type and
	// has been uploaded to the DLI resource management system.
	// The OBS paths are allowed, for example, `obs://<bucket name>/<package name>`.
	// Changing this parameter will submit a new spark job.
	AppName pulumi.StringPtrInput
	// Specifies the input parameters of the main class.
	// Changing this parameter will submit a new spark job.
	AppParameters pulumi.StringPtrInput
	// Specifies the configuration items of the DLI spark.
	// Please following the document of Spark [configurations](https://spark.apache.org/docs/latest/configuration.html) for
	// this argument. If you want to enable the `access metadata` of DLI spark in HuaweiCloud, please set
	// `spark.dli.metaAccess.enable` to `true`. Changing this parameter will submit a new spark job.
	Configurations pulumi.StringMapInput
	// Time of the DLI spark job submit.
	CreatedAt pulumi.StringPtrInput
	// Specifies a list of package resource objects.
	// The object structure is documented below.
	// Changing this parameter will submit a new spark job.
	DependentPackages SparkJobDependentPackageArrayInput
	// Specifies the number of CPU cores of the Spark application driver.
	// The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	DriverCores pulumi.IntPtrInput
	// Specifies the driver memory of the spark application.
	// The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	DriverMemory pulumi.StringPtrInput
	// Specifies the number of CPU cores of each executor in the Spark
	// application. The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	ExecutorCores pulumi.IntPtrInput
	// Specifies the executor memory of the spark application.
	// application. The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	ExecutorMemory pulumi.StringPtrInput
	// Specifies the number of executors in a spark application.
	// The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	Executors pulumi.IntPtrInput
	// Specifies a list of the other dependencies name which has been uploaded to the
	// DLI resource management system. The OBS paths are allowed, for example, `obs://<bucket name>/<dependent files>`.
	// Changing this parameter will submit a new spark job.
	Files pulumi.StringArrayInput
	// Specifies a list of the jar package name which has been uploaded to the DLI
	// resource management system. The OBS paths are allowed, for example, `obs://<bucket name>/<package name>`.
	// Changing this parameter will submit a new spark job.
	Jars pulumi.StringArrayInput
	// Specifies the main class of the spark job.
	// Required if the `appName` is the JAR type.
	// Changing this parameter will submit a new spark job.
	MainClass pulumi.StringPtrInput
	// Specifies the maximum retry times.
	// The default value of this value corresponds to the configuration of the selected `specification`.
	// If you set this value instead of the default value, `specification` will be invalid.
	// Changing this parameter will submit a new spark job.
	MaxRetries pulumi.IntPtrInput
	// Specifies a list of modules that depend on system resources.
	// The dependent modules and corresponding services are as follows.
	// Changing this parameter will submit a new spark job.
	// + **sys.datasource.hbase**: CloudTable/MRS HBase
	// + **sys.datasource.opentsdb**: CloudTable/MRS OpenTSDB
	// + **sys.datasource.rds**: RDS MySQL
	// + **sys.datasource.css**: CSS
	Modules pulumi.StringArrayInput
	// Specifies the spark job name.
	// The value contains a maximum of 128 characters.
	// Changing this parameter will submit a new spark job.
	Name pulumi.StringPtrInput
	// The owner of the spark job.
	Owner pulumi.StringPtrInput
	// Specifies a list of the python file name which has been uploaded to the
	// DLI resource management system. The OBS paths are allowed, for example, `obs://<bucket name>/<python file name>`.
	// Changing this parameter will submit a new spark job.
	PythonFiles pulumi.StringArrayInput
	// Specifies the DLI queue name.
	// Changing this parameter will submit a new spark job.
	QueueName pulumi.StringPtrInput
	// Specifies the region in which to submit a spark job.
	// If omitted, the provider-level region will be used.
	// Changing this parameter will submit a new spark job.
	Region pulumi.StringPtrInput
	// Specifies the compute resource type for spark application.
	// The available types and related specifications are as follows, default to minimum configuration (type **A**).
	// Changing this parameter will submit a new spark job.
	Specification pulumi.StringPtrInput
}

func (SparkJobState) ElementType

func (SparkJobState) ElementType() reflect.Type

type SqlJob

type SqlJob struct {
	pulumi.CustomResourceState

	// Specifies the configuration parameters for the SQL job. Changing this parameter
	// will create a new resource. Structure is documented below.
	Conf SqlJobConfPtrOutput `pulumi:"conf"`
	// Specifies the database where the SQL is executed. This argument does
	// not need to be configured during database creation. Changing this parameter will create a new resource.
	DatabaseName pulumi.StringPtrOutput `pulumi:"databaseName"`
	// Job running duration (unit: millisecond).
	Duration pulumi.IntOutput `pulumi:"duration"`
	// Type of a job, Includes **DDL**, **DCL**, **IMPORT**, **EXPORT**, **QUERY**, **INSERT**,
	// **DATA_MIGRATION**, **UPDATE**, **DELETE**, **RESTART_QUEUE** and **SCALE_QUEUE**.
	JobType pulumi.StringOutput `pulumi:"jobType"`
	// User who submits a job.
	Owner pulumi.StringOutput `pulumi:"owner"`
	// Specifies queue which this job to be submitted belongs.
	// Changing this parameter will create a new resource.
	QueueName pulumi.StringOutput `pulumi:"queueName"`
	// Specifies the region in which to create the DLI table resource. If omitted,
	// the provider-level region will be used. Changing this parameter will create a new resource.
	Region pulumi.StringOutput `pulumi:"region"`
	// When the statement type is DDL, results of the DDL are displayed.
	Rows pulumi.StringArrayArrayOutput `pulumi:"rows"`
	// When the statement type is DDL, the column name and type of DDL are displayed.
	Schemas pulumi.StringMapArrayOutput `pulumi:"schemas"`
	// Specifies SQL statement that you want to execute.
	// Changing this parameter will create a new resource.
	Sql pulumi.StringOutput `pulumi:"sql"`
	// Time when a job is started, in RFC-3339 format. e.g. `2019-10-12T07:20:50.52Z`
	StartTime pulumi.StringOutput `pulumi:"startTime"`
	// Status of a job, including **RUNNING**, **SCALING**, **LAUNCHING**, **FINISHED**, **FAILED**,
	// and **CANCELLED.**
	Status pulumi.StringOutput `pulumi:"status"`
	// Specifies label of a Job. Changing this parameter will create a new resource.
	Tags pulumi.StringMapOutput `pulumi:"tags"`
}

Manages DLI SQL job resource within HuaweiCloud

## Example Usage ### Create a Sql job

```go package main

import (

"github.com/huaweicloud/pulumi-huaweicloud/sdk/go/huaweicloud/Dli"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		cfg := config.New(ctx, "")
		databaseName := cfg.RequireObject("databaseName")
		queueName := cfg.RequireObject("queueName")
		sql := cfg.RequireObject("sql")
		_, err := Dli.NewSqlJob(ctx, "test", &Dli.SqlJobArgs{
			Sql:          pulumi.Any(sql),
			DatabaseName: pulumi.Any(databaseName),
			QueueName:    pulumi.Any(queueName),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

DLI SQL job can be imported by `id`. For example,

```sh

$ pulumi import huaweicloud:Dli/sqlJob:SqlJob example 7f803d70-c533-469f-8431-e378f3e97123

```

Note that the imported state may not be identical to your resource definition, due to some attributes missing from the API response, security or some other reason. The missing attributes include`conf`, `rows` and `schema`. It is generally recommended running `terraform plan` after importing a resource. You can then decide if changes should be applied to the resource, or the resource definition should be updated to align with the resource. Also you can ignore changes as below. resource "huaweicloud_dli_sql_job" "test" {

...

lifecycle {

ignore_changes = [

conf, rows, schema

]

} }

func GetSqlJob

func GetSqlJob(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *SqlJobState, opts ...pulumi.ResourceOption) (*SqlJob, error)

GetSqlJob gets an existing SqlJob resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewSqlJob

func NewSqlJob(ctx *pulumi.Context,
	name string, args *SqlJobArgs, opts ...pulumi.ResourceOption) (*SqlJob, error)

NewSqlJob registers a new resource with the given unique name, arguments, and options.

func (*SqlJob) ElementType

func (*SqlJob) ElementType() reflect.Type

func (*SqlJob) ToSqlJobOutput

func (i *SqlJob) ToSqlJobOutput() SqlJobOutput

func (*SqlJob) ToSqlJobOutputWithContext

func (i *SqlJob) ToSqlJobOutputWithContext(ctx context.Context) SqlJobOutput

type SqlJobArgs

type SqlJobArgs struct {
	// Specifies the configuration parameters for the SQL job. Changing this parameter
	// will create a new resource. Structure is documented below.
	Conf SqlJobConfPtrInput
	// Specifies the database where the SQL is executed. This argument does
	// not need to be configured during database creation. Changing this parameter will create a new resource.
	DatabaseName pulumi.StringPtrInput
	// Specifies queue which this job to be submitted belongs.
	// Changing this parameter will create a new resource.
	QueueName pulumi.StringPtrInput
	// Specifies the region in which to create the DLI table resource. If omitted,
	// the provider-level region will be used. Changing this parameter will create a new resource.
	Region pulumi.StringPtrInput
	// Specifies SQL statement that you want to execute.
	// Changing this parameter will create a new resource.
	Sql pulumi.StringInput
	// Specifies label of a Job. Changing this parameter will create a new resource.
	Tags pulumi.StringMapInput
}

The set of arguments for constructing a SqlJob resource.

func (SqlJobArgs) ElementType

func (SqlJobArgs) ElementType() reflect.Type

type SqlJobArray

type SqlJobArray []SqlJobInput

func (SqlJobArray) ElementType

func (SqlJobArray) ElementType() reflect.Type

func (SqlJobArray) ToSqlJobArrayOutput

func (i SqlJobArray) ToSqlJobArrayOutput() SqlJobArrayOutput

func (SqlJobArray) ToSqlJobArrayOutputWithContext

func (i SqlJobArray) ToSqlJobArrayOutputWithContext(ctx context.Context) SqlJobArrayOutput

type SqlJobArrayInput

type SqlJobArrayInput interface {
	pulumi.Input

	ToSqlJobArrayOutput() SqlJobArrayOutput
	ToSqlJobArrayOutputWithContext(context.Context) SqlJobArrayOutput
}

SqlJobArrayInput is an input type that accepts SqlJobArray and SqlJobArrayOutput values. You can construct a concrete instance of `SqlJobArrayInput` via:

SqlJobArray{ SqlJobArgs{...} }

type SqlJobArrayOutput

type SqlJobArrayOutput struct{ *pulumi.OutputState }

func (SqlJobArrayOutput) ElementType

func (SqlJobArrayOutput) ElementType() reflect.Type

func (SqlJobArrayOutput) Index

func (SqlJobArrayOutput) ToSqlJobArrayOutput

func (o SqlJobArrayOutput) ToSqlJobArrayOutput() SqlJobArrayOutput

func (SqlJobArrayOutput) ToSqlJobArrayOutputWithContext

func (o SqlJobArrayOutput) ToSqlJobArrayOutputWithContext(ctx context.Context) SqlJobArrayOutput

type SqlJobConf

type SqlJobConf struct {
	// Sets the job running timeout interval. If the timeout interval
	// expires, the job is canceled. Unit: `ms`. Changing this parameter will create a new resource.
	DliSqlJobTimeout *int `pulumi:"dliSqlJobTimeout"`
	// Specifies whether DDL and DCL statements are executed
	// asynchronously. The value true indicates that asynchronous execution is enabled. Default value is `false`.
	// Changing this parameter will create a new resource.
	DliSqlSqlasyncEnabled *bool `pulumi:"dliSqlSqlasyncEnabled"`
	// Maximum size of the table that
	// displays all working nodes when a connection is executed. You can set this parameter to -1 to disable the display.
	// Default value is `209715200`. Changing this parameter will create a new resource.
	SparkSqlAutoBroadcastJoinThreshold *int `pulumi:"sparkSqlAutoBroadcastJoinThreshold"`
	// Path of bad records. Changing this parameter will create
	// a new resource.
	SparkSqlBadRecordsPath *string `pulumi:"sparkSqlBadRecordsPath"`
	// In dynamic mode, Spark does not delete
	// the previous partitions and only overwrites the partitions without data during execution. Default value is `false`.
	// Changing this parameter will create a new resource.
	SparkSqlDynamicPartitionOverwriteEnabled *bool `pulumi:"sparkSqlDynamicPartitionOverwriteEnabled"`
	// Maximum number of bytes to be packed into a
	// single partition when a file is read. Default value is `134217728`. Changing this parameter will create a new
	// resource.
	SparkSqlFilesMaxPartitionBytes *int `pulumi:"sparkSqlFilesMaxPartitionBytes"`
	// Maximum number of records to be written
	// into a single file. If the value is zero or negative, there is no limit. Default value is `0`.
	// Changing this parameter will create a new resource.
	SparkSqlMaxRecordsPerFile *int `pulumi:"sparkSqlMaxRecordsPerFile"`
	// Default number of partitions used to filter
	// data for join or aggregation. Default value is `4096`. Changing this parameter will create a new resource.
	SparkSqlShufflePartitions *int `pulumi:"sparkSqlShufflePartitions"`
}

type SqlJobConfArgs

type SqlJobConfArgs struct {
	// Sets the job running timeout interval. If the timeout interval
	// expires, the job is canceled. Unit: `ms`. Changing this parameter will create a new resource.
	DliSqlJobTimeout pulumi.IntPtrInput `pulumi:"dliSqlJobTimeout"`
	// Specifies whether DDL and DCL statements are executed
	// asynchronously. The value true indicates that asynchronous execution is enabled. Default value is `false`.
	// Changing this parameter will create a new resource.
	DliSqlSqlasyncEnabled pulumi.BoolPtrInput `pulumi:"dliSqlSqlasyncEnabled"`
	// Maximum size of the table that
	// displays all working nodes when a connection is executed. You can set this parameter to -1 to disable the display.
	// Default value is `209715200`. Changing this parameter will create a new resource.
	SparkSqlAutoBroadcastJoinThreshold pulumi.IntPtrInput `pulumi:"sparkSqlAutoBroadcastJoinThreshold"`
	// Path of bad records. Changing this parameter will create
	// a new resource.
	SparkSqlBadRecordsPath pulumi.StringPtrInput `pulumi:"sparkSqlBadRecordsPath"`
	// In dynamic mode, Spark does not delete
	// the previous partitions and only overwrites the partitions without data during execution. Default value is `false`.
	// Changing this parameter will create a new resource.
	SparkSqlDynamicPartitionOverwriteEnabled pulumi.BoolPtrInput `pulumi:"sparkSqlDynamicPartitionOverwriteEnabled"`
	// Maximum number of bytes to be packed into a
	// single partition when a file is read. Default value is `134217728`. Changing this parameter will create a new
	// resource.
	SparkSqlFilesMaxPartitionBytes pulumi.IntPtrInput `pulumi:"sparkSqlFilesMaxPartitionBytes"`
	// Maximum number of records to be written
	// into a single file. If the value is zero or negative, there is no limit. Default value is `0`.
	// Changing this parameter will create a new resource.
	SparkSqlMaxRecordsPerFile pulumi.IntPtrInput `pulumi:"sparkSqlMaxRecordsPerFile"`
	// Default number of partitions used to filter
	// data for join or aggregation. Default value is `4096`. Changing this parameter will create a new resource.
	SparkSqlShufflePartitions pulumi.IntPtrInput `pulumi:"sparkSqlShufflePartitions"`
}

func (SqlJobConfArgs) ElementType

func (SqlJobConfArgs) ElementType() reflect.Type

func (SqlJobConfArgs) ToSqlJobConfOutput

func (i SqlJobConfArgs) ToSqlJobConfOutput() SqlJobConfOutput

func (SqlJobConfArgs) ToSqlJobConfOutputWithContext

func (i SqlJobConfArgs) ToSqlJobConfOutputWithContext(ctx context.Context) SqlJobConfOutput

func (SqlJobConfArgs) ToSqlJobConfPtrOutput

func (i SqlJobConfArgs) ToSqlJobConfPtrOutput() SqlJobConfPtrOutput

func (SqlJobConfArgs) ToSqlJobConfPtrOutputWithContext

func (i SqlJobConfArgs) ToSqlJobConfPtrOutputWithContext(ctx context.Context) SqlJobConfPtrOutput

type SqlJobConfInput

type SqlJobConfInput interface {
	pulumi.Input

	ToSqlJobConfOutput() SqlJobConfOutput
	ToSqlJobConfOutputWithContext(context.Context) SqlJobConfOutput
}

SqlJobConfInput is an input type that accepts SqlJobConfArgs and SqlJobConfOutput values. You can construct a concrete instance of `SqlJobConfInput` via:

SqlJobConfArgs{...}

type SqlJobConfOutput

type SqlJobConfOutput struct{ *pulumi.OutputState }

func (SqlJobConfOutput) DliSqlJobTimeout

func (o SqlJobConfOutput) DliSqlJobTimeout() pulumi.IntPtrOutput

Sets the job running timeout interval. If the timeout interval expires, the job is canceled. Unit: `ms`. Changing this parameter will create a new resource.

func (SqlJobConfOutput) DliSqlSqlasyncEnabled

func (o SqlJobConfOutput) DliSqlSqlasyncEnabled() pulumi.BoolPtrOutput

Specifies whether DDL and DCL statements are executed asynchronously. The value true indicates that asynchronous execution is enabled. Default value is `false`. Changing this parameter will create a new resource.

func (SqlJobConfOutput) ElementType

func (SqlJobConfOutput) ElementType() reflect.Type

func (SqlJobConfOutput) SparkSqlAutoBroadcastJoinThreshold

func (o SqlJobConfOutput) SparkSqlAutoBroadcastJoinThreshold() pulumi.IntPtrOutput

Maximum size of the table that displays all working nodes when a connection is executed. You can set this parameter to -1 to disable the display. Default value is `209715200`. Changing this parameter will create a new resource.

func (SqlJobConfOutput) SparkSqlBadRecordsPath

func (o SqlJobConfOutput) SparkSqlBadRecordsPath() pulumi.StringPtrOutput

Path of bad records. Changing this parameter will create a new resource.

func (SqlJobConfOutput) SparkSqlDynamicPartitionOverwriteEnabled

func (o SqlJobConfOutput) SparkSqlDynamicPartitionOverwriteEnabled() pulumi.BoolPtrOutput

In dynamic mode, Spark does not delete the previous partitions and only overwrites the partitions without data during execution. Default value is `false`. Changing this parameter will create a new resource.

func (SqlJobConfOutput) SparkSqlFilesMaxPartitionBytes

func (o SqlJobConfOutput) SparkSqlFilesMaxPartitionBytes() pulumi.IntPtrOutput

Maximum number of bytes to be packed into a single partition when a file is read. Default value is `134217728`. Changing this parameter will create a new resource.

func (SqlJobConfOutput) SparkSqlMaxRecordsPerFile

func (o SqlJobConfOutput) SparkSqlMaxRecordsPerFile() pulumi.IntPtrOutput

Maximum number of records to be written into a single file. If the value is zero or negative, there is no limit. Default value is `0`. Changing this parameter will create a new resource.

func (SqlJobConfOutput) SparkSqlShufflePartitions

func (o SqlJobConfOutput) SparkSqlShufflePartitions() pulumi.IntPtrOutput

Default number of partitions used to filter data for join or aggregation. Default value is `4096`. Changing this parameter will create a new resource.

func (SqlJobConfOutput) ToSqlJobConfOutput

func (o SqlJobConfOutput) ToSqlJobConfOutput() SqlJobConfOutput

func (SqlJobConfOutput) ToSqlJobConfOutputWithContext

func (o SqlJobConfOutput) ToSqlJobConfOutputWithContext(ctx context.Context) SqlJobConfOutput

func (SqlJobConfOutput) ToSqlJobConfPtrOutput

func (o SqlJobConfOutput) ToSqlJobConfPtrOutput() SqlJobConfPtrOutput

func (SqlJobConfOutput) ToSqlJobConfPtrOutputWithContext

func (o SqlJobConfOutput) ToSqlJobConfPtrOutputWithContext(ctx context.Context) SqlJobConfPtrOutput

type SqlJobConfPtrInput

type SqlJobConfPtrInput interface {
	pulumi.Input

	ToSqlJobConfPtrOutput() SqlJobConfPtrOutput
	ToSqlJobConfPtrOutputWithContext(context.Context) SqlJobConfPtrOutput
}

SqlJobConfPtrInput is an input type that accepts SqlJobConfArgs, SqlJobConfPtr and SqlJobConfPtrOutput values. You can construct a concrete instance of `SqlJobConfPtrInput` via:

        SqlJobConfArgs{...}

or:

        nil

func SqlJobConfPtr

func SqlJobConfPtr(v *SqlJobConfArgs) SqlJobConfPtrInput

type SqlJobConfPtrOutput

type SqlJobConfPtrOutput struct{ *pulumi.OutputState }

func (SqlJobConfPtrOutput) DliSqlJobTimeout

func (o SqlJobConfPtrOutput) DliSqlJobTimeout() pulumi.IntPtrOutput

Sets the job running timeout interval. If the timeout interval expires, the job is canceled. Unit: `ms`. Changing this parameter will create a new resource.

func (SqlJobConfPtrOutput) DliSqlSqlasyncEnabled

func (o SqlJobConfPtrOutput) DliSqlSqlasyncEnabled() pulumi.BoolPtrOutput

Specifies whether DDL and DCL statements are executed asynchronously. The value true indicates that asynchronous execution is enabled. Default value is `false`. Changing this parameter will create a new resource.

func (SqlJobConfPtrOutput) Elem

func (SqlJobConfPtrOutput) ElementType

func (SqlJobConfPtrOutput) ElementType() reflect.Type

func (SqlJobConfPtrOutput) SparkSqlAutoBroadcastJoinThreshold

func (o SqlJobConfPtrOutput) SparkSqlAutoBroadcastJoinThreshold() pulumi.IntPtrOutput

Maximum size of the table that displays all working nodes when a connection is executed. You can set this parameter to -1 to disable the display. Default value is `209715200`. Changing this parameter will create a new resource.

func (SqlJobConfPtrOutput) SparkSqlBadRecordsPath

func (o SqlJobConfPtrOutput) SparkSqlBadRecordsPath() pulumi.StringPtrOutput

Path of bad records. Changing this parameter will create a new resource.

func (SqlJobConfPtrOutput) SparkSqlDynamicPartitionOverwriteEnabled

func (o SqlJobConfPtrOutput) SparkSqlDynamicPartitionOverwriteEnabled() pulumi.BoolPtrOutput

In dynamic mode, Spark does not delete the previous partitions and only overwrites the partitions without data during execution. Default value is `false`. Changing this parameter will create a new resource.

func (SqlJobConfPtrOutput) SparkSqlFilesMaxPartitionBytes

func (o SqlJobConfPtrOutput) SparkSqlFilesMaxPartitionBytes() pulumi.IntPtrOutput

Maximum number of bytes to be packed into a single partition when a file is read. Default value is `134217728`. Changing this parameter will create a new resource.

func (SqlJobConfPtrOutput) SparkSqlMaxRecordsPerFile

func (o SqlJobConfPtrOutput) SparkSqlMaxRecordsPerFile() pulumi.IntPtrOutput

Maximum number of records to be written into a single file. If the value is zero or negative, there is no limit. Default value is `0`. Changing this parameter will create a new resource.

func (SqlJobConfPtrOutput) SparkSqlShufflePartitions

func (o SqlJobConfPtrOutput) SparkSqlShufflePartitions() pulumi.IntPtrOutput

Default number of partitions used to filter data for join or aggregation. Default value is `4096`. Changing this parameter will create a new resource.

func (SqlJobConfPtrOutput) ToSqlJobConfPtrOutput

func (o SqlJobConfPtrOutput) ToSqlJobConfPtrOutput() SqlJobConfPtrOutput

func (SqlJobConfPtrOutput) ToSqlJobConfPtrOutputWithContext

func (o SqlJobConfPtrOutput) ToSqlJobConfPtrOutputWithContext(ctx context.Context) SqlJobConfPtrOutput

type SqlJobInput

type SqlJobInput interface {
	pulumi.Input

	ToSqlJobOutput() SqlJobOutput
	ToSqlJobOutputWithContext(ctx context.Context) SqlJobOutput
}

type SqlJobMap

type SqlJobMap map[string]SqlJobInput

func (SqlJobMap) ElementType

func (SqlJobMap) ElementType() reflect.Type

func (SqlJobMap) ToSqlJobMapOutput

func (i SqlJobMap) ToSqlJobMapOutput() SqlJobMapOutput

func (SqlJobMap) ToSqlJobMapOutputWithContext

func (i SqlJobMap) ToSqlJobMapOutputWithContext(ctx context.Context) SqlJobMapOutput

type SqlJobMapInput

type SqlJobMapInput interface {
	pulumi.Input

	ToSqlJobMapOutput() SqlJobMapOutput
	ToSqlJobMapOutputWithContext(context.Context) SqlJobMapOutput
}

SqlJobMapInput is an input type that accepts SqlJobMap and SqlJobMapOutput values. You can construct a concrete instance of `SqlJobMapInput` via:

SqlJobMap{ "key": SqlJobArgs{...} }

type SqlJobMapOutput

type SqlJobMapOutput struct{ *pulumi.OutputState }

func (SqlJobMapOutput) ElementType

func (SqlJobMapOutput) ElementType() reflect.Type

func (SqlJobMapOutput) MapIndex

func (SqlJobMapOutput) ToSqlJobMapOutput

func (o SqlJobMapOutput) ToSqlJobMapOutput() SqlJobMapOutput

func (SqlJobMapOutput) ToSqlJobMapOutputWithContext

func (o SqlJobMapOutput) ToSqlJobMapOutputWithContext(ctx context.Context) SqlJobMapOutput

type SqlJobOutput

type SqlJobOutput struct{ *pulumi.OutputState }

func (SqlJobOutput) Conf

Specifies the configuration parameters for the SQL job. Changing this parameter will create a new resource. Structure is documented below.

func (SqlJobOutput) DatabaseName

func (o SqlJobOutput) DatabaseName() pulumi.StringPtrOutput

Specifies the database where the SQL is executed. This argument does not need to be configured during database creation. Changing this parameter will create a new resource.

func (SqlJobOutput) Duration

func (o SqlJobOutput) Duration() pulumi.IntOutput

Job running duration (unit: millisecond).

func (SqlJobOutput) ElementType

func (SqlJobOutput) ElementType() reflect.Type

func (SqlJobOutput) JobType

func (o SqlJobOutput) JobType() pulumi.StringOutput

Type of a job, Includes **DDL**, **DCL**, **IMPORT**, **EXPORT**, **QUERY**, **INSERT**, **DATA_MIGRATION**, **UPDATE**, **DELETE**, **RESTART_QUEUE** and **SCALE_QUEUE**.

func (SqlJobOutput) Owner

func (o SqlJobOutput) Owner() pulumi.StringOutput

User who submits a job.

func (SqlJobOutput) QueueName

func (o SqlJobOutput) QueueName() pulumi.StringOutput

Specifies queue which this job to be submitted belongs. Changing this parameter will create a new resource.

func (SqlJobOutput) Region

func (o SqlJobOutput) Region() pulumi.StringOutput

Specifies the region in which to create the DLI table resource. If omitted, the provider-level region will be used. Changing this parameter will create a new resource.

func (SqlJobOutput) Rows

When the statement type is DDL, results of the DDL are displayed.

func (SqlJobOutput) Schemas

When the statement type is DDL, the column name and type of DDL are displayed.

func (SqlJobOutput) Sql

Specifies SQL statement that you want to execute. Changing this parameter will create a new resource.

func (SqlJobOutput) StartTime

func (o SqlJobOutput) StartTime() pulumi.StringOutput

Time when a job is started, in RFC-3339 format. e.g. `2019-10-12T07:20:50.52Z`

func (SqlJobOutput) Status

func (o SqlJobOutput) Status() pulumi.StringOutput

Status of a job, including **RUNNING**, **SCALING**, **LAUNCHING**, **FINISHED**, **FAILED**, and **CANCELLED.**

func (SqlJobOutput) Tags

Specifies label of a Job. Changing this parameter will create a new resource.

func (SqlJobOutput) ToSqlJobOutput

func (o SqlJobOutput) ToSqlJobOutput() SqlJobOutput

func (SqlJobOutput) ToSqlJobOutputWithContext

func (o SqlJobOutput) ToSqlJobOutputWithContext(ctx context.Context) SqlJobOutput

type SqlJobState

type SqlJobState struct {
	// Specifies the configuration parameters for the SQL job. Changing this parameter
	// will create a new resource. Structure is documented below.
	Conf SqlJobConfPtrInput
	// Specifies the database where the SQL is executed. This argument does
	// not need to be configured during database creation. Changing this parameter will create a new resource.
	DatabaseName pulumi.StringPtrInput
	// Job running duration (unit: millisecond).
	Duration pulumi.IntPtrInput
	// Type of a job, Includes **DDL**, **DCL**, **IMPORT**, **EXPORT**, **QUERY**, **INSERT**,
	// **DATA_MIGRATION**, **UPDATE**, **DELETE**, **RESTART_QUEUE** and **SCALE_QUEUE**.
	JobType pulumi.StringPtrInput
	// User who submits a job.
	Owner pulumi.StringPtrInput
	// Specifies queue which this job to be submitted belongs.
	// Changing this parameter will create a new resource.
	QueueName pulumi.StringPtrInput
	// Specifies the region in which to create the DLI table resource. If omitted,
	// the provider-level region will be used. Changing this parameter will create a new resource.
	Region pulumi.StringPtrInput
	// When the statement type is DDL, results of the DDL are displayed.
	Rows pulumi.StringArrayArrayInput
	// When the statement type is DDL, the column name and type of DDL are displayed.
	Schemas pulumi.StringMapArrayInput
	// Specifies SQL statement that you want to execute.
	// Changing this parameter will create a new resource.
	Sql pulumi.StringPtrInput
	// Time when a job is started, in RFC-3339 format. e.g. `2019-10-12T07:20:50.52Z`
	StartTime pulumi.StringPtrInput
	// Status of a job, including **RUNNING**, **SCALING**, **LAUNCHING**, **FINISHED**, **FAILED**,
	// and **CANCELLED.**
	Status pulumi.StringPtrInput
	// Specifies label of a Job. Changing this parameter will create a new resource.
	Tags pulumi.StringMapInput
}

func (SqlJobState) ElementType

func (SqlJobState) ElementType() reflect.Type

type Table

type Table struct {
	pulumi.CustomResourceState

	// Specifies storage path of data which will be import to the OBS table.
	// Changing this parameter will create a new resource.
	// > If you need to import data stored in OBS to the OBS table, set this parameter to the path of a folder. If the table
	// creation path is a file, data fails to be imported. which must be a path on OBS and must begin with obs.
	BucketLocation pulumi.StringOutput `pulumi:"bucketLocation"`
	// Specifies Columns of the new table. Structure is documented below.
	// Changing this parameter will create a new resource.
	Columns TableColumnArrayOutput `pulumi:"columns"`
	// Specifies type of the data to be added to the OBS table.
	// The options: parquet, orc, csv, json, carbon, and avro. Changing this parameter will create a new resource.
	DataFormat pulumi.StringOutput `pulumi:"dataFormat"`
	// Specifies data storage location. Changing this parameter will create
	// a newresource. The options are as follows:
	// + **DLI**: Data stored in DLI tables is applicable to delay-sensitive services, such as interactive queries.
	// + **OBS**: Data stored in OBS tables is applicable to delay-insensitive services, such as historical data statistics
	//   and analysis.
	DataLocation pulumi.StringOutput `pulumi:"dataLocation"`
	// Specifies the database name which the table belongs to.
	// Changing this parameter will create a new resource.
	DatabaseName pulumi.StringOutput `pulumi:"databaseName"`
	// Specifies date type. `yyyy-MM-dd` is used by default. Only
	// data in CSV and JSON files has this attribute. Changing this parameter will create a new resource.
	DateFormat pulumi.StringOutput `pulumi:"dateFormat"`
	// Specifies data delimiter. Only data in CSV files has this
	// attribute. Changing this parameter will create a new resource.
	Delimiter pulumi.StringOutput `pulumi:"delimiter"`
	// Specifies the description of column. Changing this parameter will
	// create a new resource.
	Description pulumi.StringPtrOutput `pulumi:"description"`
	// Specifies escape character. Backslashes (`\\`) are used by
	// default. Only data in CSV files has this attribute. Changing this parameter will create a new resource.
	EscapeChar pulumi.StringOutput `pulumi:"escapeChar"`
	// Specifies the name of column. Changing this parameter will create a new
	// resource.
	Name pulumi.StringOutput `pulumi:"name"`
	// Specifies reference character. Double quotation marks (`\`)
	// are used by default. Only data in CSV files has this attribute. Changing this parameter will create a new resource.
	QuoteChar pulumi.StringOutput `pulumi:"quoteChar"`
	// Specifies the region in which to create the dli table resource. If omitted,
	// the provider-level region will be used. Changing this parameter will create a new resource.
	Region pulumi.StringOutput `pulumi:"region"`
	// Specifies timestamp type. `yyyy-MM-dd HH:mm:ss` is used by default.
	// Only data in CSV and JSON files has this attribute. Changing this parameter will create a new resource.
	TimestampFormat pulumi.StringOutput `pulumi:"timestampFormat"`
	// Specifies whether the table header is included in the data file.
	// Only data in CSV files has this attribute. Changing this parameter will create a new resource.
	WithColumnHeader pulumi.BoolOutput `pulumi:"withColumnHeader"`
}

Manages DLI Table resource within HuaweiCloud

## Example Usage ### Create a Table

```go package main

import (

"github.com/huaweicloud/pulumi-huaweicloud/sdk/go/huaweicloud/Dli"
"github.com/pulumi/pulumi-huaweicloud/sdk/go/huaweicloud/Dli"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		cfg := config.New(ctx, "")
		databaseName := cfg.RequireObject("databaseName")
		testDatabase, err := Dli.NewDatabase(ctx, "testDatabase", nil)
		if err != nil {
			return err
		}
		_, err = Dli.NewTable(ctx, "testTable", &Dli.TableArgs{
			DatabaseName: testDatabase.Name,
			DataLocation: pulumi.String("DLI"),
			Description:  pulumi.String("SQL table_1 description"),
			Columns: dli.TableColumnArray{
				&dli.TableColumnArgs{
					Name:        pulumi.String("column_1"),
					Type:        pulumi.String("string"),
					Description: pulumi.String("the first column"),
				},
				&dli.TableColumnArgs{
					Name:        pulumi.String("column_2"),
					Type:        pulumi.String("string"),
					Description: pulumi.String("the second column"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

DLI table can be imported by `id`. It is composed of the name of database which table belongs and the name of table,

separated by a slash. For example,

```sh

$ pulumi import huaweicloud:Dli/table:Table example <database_name>/<table_name>

```

func GetTable

func GetTable(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *TableState, opts ...pulumi.ResourceOption) (*Table, error)

GetTable gets an existing Table resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewTable

func NewTable(ctx *pulumi.Context,
	name string, args *TableArgs, opts ...pulumi.ResourceOption) (*Table, error)

NewTable registers a new resource with the given unique name, arguments, and options.

func (*Table) ElementType

func (*Table) ElementType() reflect.Type

func (*Table) ToTableOutput

func (i *Table) ToTableOutput() TableOutput

func (*Table) ToTableOutputWithContext

func (i *Table) ToTableOutputWithContext(ctx context.Context) TableOutput

type TableArgs

type TableArgs struct {
	// Specifies storage path of data which will be import to the OBS table.
	// Changing this parameter will create a new resource.
	// > If you need to import data stored in OBS to the OBS table, set this parameter to the path of a folder. If the table
	// creation path is a file, data fails to be imported. which must be a path on OBS and must begin with obs.
	BucketLocation pulumi.StringPtrInput
	// Specifies Columns of the new table. Structure is documented below.
	// Changing this parameter will create a new resource.
	Columns TableColumnArrayInput
	// Specifies type of the data to be added to the OBS table.
	// The options: parquet, orc, csv, json, carbon, and avro. Changing this parameter will create a new resource.
	DataFormat pulumi.StringPtrInput
	// Specifies data storage location. Changing this parameter will create
	// a newresource. The options are as follows:
	// + **DLI**: Data stored in DLI tables is applicable to delay-sensitive services, such as interactive queries.
	// + **OBS**: Data stored in OBS tables is applicable to delay-insensitive services, such as historical data statistics
	//   and analysis.
	DataLocation pulumi.StringInput
	// Specifies the database name which the table belongs to.
	// Changing this parameter will create a new resource.
	DatabaseName pulumi.StringInput
	// Specifies date type. `yyyy-MM-dd` is used by default. Only
	// data in CSV and JSON files has this attribute. Changing this parameter will create a new resource.
	DateFormat pulumi.StringPtrInput
	// Specifies data delimiter. Only data in CSV files has this
	// attribute. Changing this parameter will create a new resource.
	Delimiter pulumi.StringPtrInput
	// Specifies the description of column. Changing this parameter will
	// create a new resource.
	Description pulumi.StringPtrInput
	// Specifies escape character. Backslashes (`\\`) are used by
	// default. Only data in CSV files has this attribute. Changing this parameter will create a new resource.
	EscapeChar pulumi.StringPtrInput
	// Specifies the name of column. Changing this parameter will create a new
	// resource.
	Name pulumi.StringPtrInput
	// Specifies reference character. Double quotation marks (`\`)
	// are used by default. Only data in CSV files has this attribute. Changing this parameter will create a new resource.
	QuoteChar pulumi.StringPtrInput
	// Specifies the region in which to create the dli table resource. If omitted,
	// the provider-level region will be used. Changing this parameter will create a new resource.
	Region pulumi.StringPtrInput
	// Specifies timestamp type. `yyyy-MM-dd HH:mm:ss` is used by default.
	// Only data in CSV and JSON files has this attribute. Changing this parameter will create a new resource.
	TimestampFormat pulumi.StringPtrInput
	// Specifies whether the table header is included in the data file.
	// Only data in CSV files has this attribute. Changing this parameter will create a new resource.
	WithColumnHeader pulumi.BoolPtrInput
}

The set of arguments for constructing a Table resource.

func (TableArgs) ElementType

func (TableArgs) ElementType() reflect.Type

type TableArray

type TableArray []TableInput

func (TableArray) ElementType

func (TableArray) ElementType() reflect.Type

func (TableArray) ToTableArrayOutput

func (i TableArray) ToTableArrayOutput() TableArrayOutput

func (TableArray) ToTableArrayOutputWithContext

func (i TableArray) ToTableArrayOutputWithContext(ctx context.Context) TableArrayOutput

type TableArrayInput

type TableArrayInput interface {
	pulumi.Input

	ToTableArrayOutput() TableArrayOutput
	ToTableArrayOutputWithContext(context.Context) TableArrayOutput
}

TableArrayInput is an input type that accepts TableArray and TableArrayOutput values. You can construct a concrete instance of `TableArrayInput` via:

TableArray{ TableArgs{...} }

type TableArrayOutput

type TableArrayOutput struct{ *pulumi.OutputState }

func (TableArrayOutput) ElementType

func (TableArrayOutput) ElementType() reflect.Type

func (TableArrayOutput) Index

func (TableArrayOutput) ToTableArrayOutput

func (o TableArrayOutput) ToTableArrayOutput() TableArrayOutput

func (TableArrayOutput) ToTableArrayOutputWithContext

func (o TableArrayOutput) ToTableArrayOutputWithContext(ctx context.Context) TableArrayOutput

type TableColumn

type TableColumn struct {
	// Specifies the description of column. Changing this parameter will
	// create a new resource.
	Description *string `pulumi:"description"`
	// Specifies whether the column is a partition column. The value
	// `true` indicates a partition column, and the value false indicates a non-partition column. The default value
	// is false. Changing this parameter will create a new resource.
	IsPartition *bool `pulumi:"isPartition"`
	// Specifies the name of column. Changing this parameter will create a new
	// resource.
	Name string `pulumi:"name"`
	// Specifies data type of column. Changing this parameter will create a new
	// resource.
	Type string `pulumi:"type"`
}

type TableColumnArgs

type TableColumnArgs struct {
	// Specifies the description of column. Changing this parameter will
	// create a new resource.
	Description pulumi.StringPtrInput `pulumi:"description"`
	// Specifies whether the column is a partition column. The value
	// `true` indicates a partition column, and the value false indicates a non-partition column. The default value
	// is false. Changing this parameter will create a new resource.
	IsPartition pulumi.BoolPtrInput `pulumi:"isPartition"`
	// Specifies the name of column. Changing this parameter will create a new
	// resource.
	Name pulumi.StringInput `pulumi:"name"`
	// Specifies data type of column. Changing this parameter will create a new
	// resource.
	Type pulumi.StringInput `pulumi:"type"`
}

func (TableColumnArgs) ElementType

func (TableColumnArgs) ElementType() reflect.Type

func (TableColumnArgs) ToTableColumnOutput

func (i TableColumnArgs) ToTableColumnOutput() TableColumnOutput

func (TableColumnArgs) ToTableColumnOutputWithContext

func (i TableColumnArgs) ToTableColumnOutputWithContext(ctx context.Context) TableColumnOutput

type TableColumnArray

type TableColumnArray []TableColumnInput

func (TableColumnArray) ElementType

func (TableColumnArray) ElementType() reflect.Type

func (TableColumnArray) ToTableColumnArrayOutput

func (i TableColumnArray) ToTableColumnArrayOutput() TableColumnArrayOutput

func (TableColumnArray) ToTableColumnArrayOutputWithContext

func (i TableColumnArray) ToTableColumnArrayOutputWithContext(ctx context.Context) TableColumnArrayOutput

type TableColumnArrayInput

type TableColumnArrayInput interface {
	pulumi.Input

	ToTableColumnArrayOutput() TableColumnArrayOutput
	ToTableColumnArrayOutputWithContext(context.Context) TableColumnArrayOutput
}

TableColumnArrayInput is an input type that accepts TableColumnArray and TableColumnArrayOutput values. You can construct a concrete instance of `TableColumnArrayInput` via:

TableColumnArray{ TableColumnArgs{...} }

type TableColumnArrayOutput

type TableColumnArrayOutput struct{ *pulumi.OutputState }

func (TableColumnArrayOutput) ElementType

func (TableColumnArrayOutput) ElementType() reflect.Type

func (TableColumnArrayOutput) Index

func (TableColumnArrayOutput) ToTableColumnArrayOutput

func (o TableColumnArrayOutput) ToTableColumnArrayOutput() TableColumnArrayOutput

func (TableColumnArrayOutput) ToTableColumnArrayOutputWithContext

func (o TableColumnArrayOutput) ToTableColumnArrayOutputWithContext(ctx context.Context) TableColumnArrayOutput

type TableColumnInput

type TableColumnInput interface {
	pulumi.Input

	ToTableColumnOutput() TableColumnOutput
	ToTableColumnOutputWithContext(context.Context) TableColumnOutput
}

TableColumnInput is an input type that accepts TableColumnArgs and TableColumnOutput values. You can construct a concrete instance of `TableColumnInput` via:

TableColumnArgs{...}

type TableColumnOutput

type TableColumnOutput struct{ *pulumi.OutputState }

func (TableColumnOutput) Description

func (o TableColumnOutput) Description() pulumi.StringPtrOutput

Specifies the description of column. Changing this parameter will create a new resource.

func (TableColumnOutput) ElementType

func (TableColumnOutput) ElementType() reflect.Type

func (TableColumnOutput) IsPartition

func (o TableColumnOutput) IsPartition() pulumi.BoolPtrOutput

Specifies whether the column is a partition column. The value `true` indicates a partition column, and the value false indicates a non-partition column. The default value is false. Changing this parameter will create a new resource.

func (TableColumnOutput) Name

Specifies the name of column. Changing this parameter will create a new resource.

func (TableColumnOutput) ToTableColumnOutput

func (o TableColumnOutput) ToTableColumnOutput() TableColumnOutput

func (TableColumnOutput) ToTableColumnOutputWithContext

func (o TableColumnOutput) ToTableColumnOutputWithContext(ctx context.Context) TableColumnOutput

func (TableColumnOutput) Type

Specifies data type of column. Changing this parameter will create a new resource.

type TableInput

type TableInput interface {
	pulumi.Input

	ToTableOutput() TableOutput
	ToTableOutputWithContext(ctx context.Context) TableOutput
}

type TableMap

type TableMap map[string]TableInput

func (TableMap) ElementType

func (TableMap) ElementType() reflect.Type

func (TableMap) ToTableMapOutput

func (i TableMap) ToTableMapOutput() TableMapOutput

func (TableMap) ToTableMapOutputWithContext

func (i TableMap) ToTableMapOutputWithContext(ctx context.Context) TableMapOutput

type TableMapInput

type TableMapInput interface {
	pulumi.Input

	ToTableMapOutput() TableMapOutput
	ToTableMapOutputWithContext(context.Context) TableMapOutput
}

TableMapInput is an input type that accepts TableMap and TableMapOutput values. You can construct a concrete instance of `TableMapInput` via:

TableMap{ "key": TableArgs{...} }

type TableMapOutput

type TableMapOutput struct{ *pulumi.OutputState }

func (TableMapOutput) ElementType

func (TableMapOutput) ElementType() reflect.Type

func (TableMapOutput) MapIndex

func (TableMapOutput) ToTableMapOutput

func (o TableMapOutput) ToTableMapOutput() TableMapOutput

func (TableMapOutput) ToTableMapOutputWithContext

func (o TableMapOutput) ToTableMapOutputWithContext(ctx context.Context) TableMapOutput

type TableOutput

type TableOutput struct{ *pulumi.OutputState }

func (TableOutput) BucketLocation

func (o TableOutput) BucketLocation() pulumi.StringOutput

Specifies storage path of data which will be import to the OBS table. Changing this parameter will create a new resource. > If you need to import data stored in OBS to the OBS table, set this parameter to the path of a folder. If the table creation path is a file, data fails to be imported. which must be a path on OBS and must begin with obs.

func (TableOutput) Columns

func (o TableOutput) Columns() TableColumnArrayOutput

Specifies Columns of the new table. Structure is documented below. Changing this parameter will create a new resource.

func (TableOutput) DataFormat

func (o TableOutput) DataFormat() pulumi.StringOutput

Specifies type of the data to be added to the OBS table. The options: parquet, orc, csv, json, carbon, and avro. Changing this parameter will create a new resource.

func (TableOutput) DataLocation

func (o TableOutput) DataLocation() pulumi.StringOutput

Specifies data storage location. Changing this parameter will create a newresource. The options are as follows:

  • **DLI**: Data stored in DLI tables is applicable to delay-sensitive services, such as interactive queries.
  • **OBS**: Data stored in OBS tables is applicable to delay-insensitive services, such as historical data statistics and analysis.

func (TableOutput) DatabaseName

func (o TableOutput) DatabaseName() pulumi.StringOutput

Specifies the database name which the table belongs to. Changing this parameter will create a new resource.

func (TableOutput) DateFormat

func (o TableOutput) DateFormat() pulumi.StringOutput

Specifies date type. `yyyy-MM-dd` is used by default. Only data in CSV and JSON files has this attribute. Changing this parameter will create a new resource.

func (TableOutput) Delimiter

func (o TableOutput) Delimiter() pulumi.StringOutput

Specifies data delimiter. Only data in CSV files has this attribute. Changing this parameter will create a new resource.

func (TableOutput) Description

func (o TableOutput) Description() pulumi.StringPtrOutput

Specifies the description of column. Changing this parameter will create a new resource.

func (TableOutput) ElementType

func (TableOutput) ElementType() reflect.Type

func (TableOutput) EscapeChar

func (o TableOutput) EscapeChar() pulumi.StringOutput

Specifies escape character. Backslashes (`\\`) are used by default. Only data in CSV files has this attribute. Changing this parameter will create a new resource.

func (TableOutput) Name

func (o TableOutput) Name() pulumi.StringOutput

Specifies the name of column. Changing this parameter will create a new resource.

func (TableOutput) QuoteChar

func (o TableOutput) QuoteChar() pulumi.StringOutput

Specifies reference character. Double quotation marks (`\`) are used by default. Only data in CSV files has this attribute. Changing this parameter will create a new resource.

func (TableOutput) Region

func (o TableOutput) Region() pulumi.StringOutput

Specifies the region in which to create the dli table resource. If omitted, the provider-level region will be used. Changing this parameter will create a new resource.

func (TableOutput) TimestampFormat

func (o TableOutput) TimestampFormat() pulumi.StringOutput

Specifies timestamp type. `yyyy-MM-dd HH:mm:ss` is used by default. Only data in CSV and JSON files has this attribute. Changing this parameter will create a new resource.

func (TableOutput) ToTableOutput

func (o TableOutput) ToTableOutput() TableOutput

func (TableOutput) ToTableOutputWithContext

func (o TableOutput) ToTableOutputWithContext(ctx context.Context) TableOutput

func (TableOutput) WithColumnHeader

func (o TableOutput) WithColumnHeader() pulumi.BoolOutput

Specifies whether the table header is included in the data file. Only data in CSV files has this attribute. Changing this parameter will create a new resource.

type TableState

type TableState struct {
	// Specifies storage path of data which will be import to the OBS table.
	// Changing this parameter will create a new resource.
	// > If you need to import data stored in OBS to the OBS table, set this parameter to the path of a folder. If the table
	// creation path is a file, data fails to be imported. which must be a path on OBS and must begin with obs.
	BucketLocation pulumi.StringPtrInput
	// Specifies Columns of the new table. Structure is documented below.
	// Changing this parameter will create a new resource.
	Columns TableColumnArrayInput
	// Specifies type of the data to be added to the OBS table.
	// The options: parquet, orc, csv, json, carbon, and avro. Changing this parameter will create a new resource.
	DataFormat pulumi.StringPtrInput
	// Specifies data storage location. Changing this parameter will create
	// a newresource. The options are as follows:
	// + **DLI**: Data stored in DLI tables is applicable to delay-sensitive services, such as interactive queries.
	// + **OBS**: Data stored in OBS tables is applicable to delay-insensitive services, such as historical data statistics
	//   and analysis.
	DataLocation pulumi.StringPtrInput
	// Specifies the database name which the table belongs to.
	// Changing this parameter will create a new resource.
	DatabaseName pulumi.StringPtrInput
	// Specifies date type. `yyyy-MM-dd` is used by default. Only
	// data in CSV and JSON files has this attribute. Changing this parameter will create a new resource.
	DateFormat pulumi.StringPtrInput
	// Specifies data delimiter. Only data in CSV files has this
	// attribute. Changing this parameter will create a new resource.
	Delimiter pulumi.StringPtrInput
	// Specifies the description of column. Changing this parameter will
	// create a new resource.
	Description pulumi.StringPtrInput
	// Specifies escape character. Backslashes (`\\`) are used by
	// default. Only data in CSV files has this attribute. Changing this parameter will create a new resource.
	EscapeChar pulumi.StringPtrInput
	// Specifies the name of column. Changing this parameter will create a new
	// resource.
	Name pulumi.StringPtrInput
	// Specifies reference character. Double quotation marks (`\`)
	// are used by default. Only data in CSV files has this attribute. Changing this parameter will create a new resource.
	QuoteChar pulumi.StringPtrInput
	// Specifies the region in which to create the dli table resource. If omitted,
	// the provider-level region will be used. Changing this parameter will create a new resource.
	Region pulumi.StringPtrInput
	// Specifies timestamp type. `yyyy-MM-dd HH:mm:ss` is used by default.
	// Only data in CSV and JSON files has this attribute. Changing this parameter will create a new resource.
	TimestampFormat pulumi.StringPtrInput
	// Specifies whether the table header is included in the data file.
	// Only data in CSV files has this attribute. Changing this parameter will create a new resource.
	WithColumnHeader pulumi.BoolPtrInput
}

func (TableState) ElementType

func (TableState) ElementType() reflect.Type

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL