Documentation
¶
Index ¶
- Constants
- func CommonEnvironmentClientWithRealCommandExecutor() *common.DatabricksClient
- func CommonInstancePoolID() string
- func DataSourceClusterZones() *schema.Resource
- func DataSourceNodeType() *schema.Resource
- func DataSourceSparkVersion() *schema.Resource
- func ResourceCluster() *schema.Resource
- func ResourceClusterPolicy() *schema.Resource
- func ResourceInstancePool() *schema.Resource
- func ResourceJob() *schema.Resource
- func ResourcePipeline() *schema.Resource
- type AutoScale
- type Availability
- type AwsAttributes
- type AzureAttributes
- type AzureDiskVolumeType
- type Cluster
- type ClusterCloudProviderNodeInfo
- type ClusterEvent
- type ClusterEventType
- type ClusterID
- type ClusterInfo
- type ClusterLibraryList
- type ClusterLibraryStatuses
- type ClusterList
- type ClusterPoliciesAPI
- type ClusterPolicy
- type ClusterPolicyCreate
- type ClusterSize
- type ClusterState
- type ClustersAPI
- func (a ClustersAPI) Create(cluster Cluster) (info ClusterInfo, err error)
- func (a ClustersAPI) Edit(cluster Cluster) (info ClusterInfo, err error)
- func (a ClustersAPI) Events(eventsRequest EventsRequest) ([]ClusterEvent, error)
- func (a ClustersAPI) Get(clusterID string) (ci ClusterInfo, err error)
- func (a ClustersAPI) GetOrCreateRunningCluster(name string, custom ...Cluster) (c ClusterInfo, err error)
- func (a ClustersAPI) GetSmallestNodeType(r NodeTypeRequest) string
- func (a ClustersAPI) LatestSparkVersion(svr SparkVersionRequest) (string, error)
- func (a ClustersAPI) LatestSparkVersionOrDefault(svr SparkVersionRequest) string
- func (a ClustersAPI) List() ([]ClusterInfo, error)
- func (a ClustersAPI) ListNodeTypes() (l NodeTypeList, err error)
- func (a ClustersAPI) ListSparkVersions() (SparkVersionsList, error)
- func (a ClustersAPI) ListZones() (ZonesInfo, error)
- func (a ClustersAPI) PermanentDelete(clusterID string) error
- func (a ClustersAPI) Pin(clusterID string) error
- func (a ClustersAPI) Restart(clusterID string) error
- func (a ClustersAPI) Start(clusterID string) error
- func (a ClustersAPI) StartAndGetInfo(clusterID string) (ClusterInfo, error)
- func (a ClustersAPI) Terminate(clusterID string) error
- func (a ClustersAPI) Unpin(clusterID string) error
- type Command
- type CommandsAPI
- type Cran
- type CronSchedule
- type DbfsStorageInfo
- type DockerBasicAuth
- type DockerImage
- type EbsVolumeType
- type EmailNotifications
- type EventDetails
- type EventsRequest
- type EventsResponse
- type GcpAttributes
- type InitScriptStorageInfo
- type InstancePool
- type InstancePoolAndStats
- type InstancePoolAwsAttributes
- type InstancePoolAzureAttributes
- type InstancePoolDiskSpec
- type InstancePoolDiskType
- type InstancePoolList
- type InstancePoolStats
- type InstancePoolsAPI
- func (a InstancePoolsAPI) Create(instancePool InstancePool) (InstancePoolAndStats, error)
- func (a InstancePoolsAPI) Delete(instancePoolID string) error
- func (a InstancePoolsAPI) List() (ipl InstancePoolList, err error)
- func (a InstancePoolsAPI) Read(instancePoolID string) (ip InstancePool, err error)
- func (a InstancePoolsAPI) Update(ip InstancePool) error
- type Job
- type JobList
- type JobRun
- type JobRunsList
- type JobRunsListRequest
- type JobSettings
- type JobTaskSettings
- type JobsAPI
- func (a JobsAPI) Create(jobSettings JobSettings) (Job, error)
- func (a JobsAPI) Delete(id string) error
- func (a JobsAPI) List() (l JobList, err error)
- func (a JobsAPI) Read(id string) (job Job, err error)
- func (a JobsAPI) Restart(id string, timeout time.Duration) error
- func (a JobsAPI) RunNow(jobID int64) (int64, error)
- func (a JobsAPI) RunsCancel(runID int64, timeout time.Duration) error
- func (a JobsAPI) RunsGet(runID int64) (JobRun, error)
- func (a JobsAPI) RunsList(r JobRunsListRequest) (jrl JobRunsList, err error)
- func (a JobsAPI) Start(jobID int64, timeout time.Duration) error
- func (a JobsAPI) Update(id string, jobSettings JobSettings) error
- type LibrariesAPI
- type Library
- type LibraryStatus
- type LocalFileInfo
- type LogSyncStatus
- type Maven
- type NodeInstanceType
- type NodeType
- type NodeTypeList
- type NodeTypeRequest
- type NotebookTask
- type PipelineHealthStatus
- type PipelineState
- type PipelineTask
- type PyPi
- type PythonWheelTask
- type ResizeCause
- type RunParameters
- type RunState
- type S3StorageInfo
- type SortOrder
- type SparkJarTask
- type SparkNode
- type SparkNodeAwsAttributes
- type SparkPythonTask
- type SparkSubmitTask
- type SparkVersion
- type SparkVersionRequest
- type SparkVersionsList
- type StorageInfo
- type TaskDependency
- type TerminationReason
- type UpdateJobRequest
- type ZonesInfo
Constants ¶
const ( // AwsAvailabilitySpot is spot instance type for clusters AwsAvailabilitySpot = "SPOT" // AwsAvailabilityOnDemand is OnDemand instance type for clusters AwsAvailabilityOnDemand = "ON_DEMAND" // AwsAvailabilitySpotWithFallback is Spot instance type for clusters with option // to fallback into on-demand if instance cannot be acquired AwsAvailabilitySpotWithFallback = "SPOT_WITH_FALLBACK" )
const ( // AzureAvailabilitySpot is spot instance type for clusters AzureAvailabilitySpot = "SPOT_AZURE" // AzureAvailabilityOnDemand is OnDemand instance type for clusters AzureAvailabilityOnDemand = "ON_DEMAND_AZURE" // AzureAvailabilitySpotWithFallback is Spot instance type for clusters with option // to fallback into on-demand if instance cannot be acquired AzureAvailabilitySpotWithFallback = "SPOT_WITH_FALLBACK_AZURE" )
https://docs.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/clusters#--azureavailability
const ( // AzureDiskVolumeTypeStandard is for standard local redundant storage AzureDiskVolumeTypeStandard = "STANDARD_LRS" // AzureDiskVolumeTypePremium is for premium local redundant storage AzureDiskVolumeTypePremium = "PREMIUM_LRS" )
const ( // EbsVolumeTypeGeneralPurposeSsd is general purpose ssd (starts at 32 gb) EbsVolumeTypeGeneralPurposeSsd = "GENERAL_PURPOSE_SSD" // EbsVolumeTypeThroughputOptimizedHdd is throughput optimized hdd (starts at 500 gb) EbsVolumeTypeThroughputOptimizedHdd = "THROUGHPUT_OPTIMIZED_HDD" )
const ( // ClusterStatePending Indicates that a cluster is in the process of being created. ClusterStatePending = "PENDING" // ClusterStateRunning Indicates that a cluster has been started and is ready for use. ClusterStateRunning = "RUNNING" // ClusterStateRestarting Indicates that a cluster is in the process of restarting. ClusterStateRestarting = "RESTARTING" // ClusterStateResizing Indicates that a cluster is in the process of adding or removing nodes. ClusterStateResizing = "RESIZING" // ClusterStateTerminating Indicates that a cluster is in the process of being destroyed. ClusterStateTerminating = "TERMINATING" // ClusterStateTerminated Indicates that a cluster has been successfully destroyed. ClusterStateTerminated = "TERMINATED" // ClusterStateError This state is not used anymore. It was used to indicate a cluster // that failed to be created. Terminating and Terminated are used instead. ClusterStateError = "ERROR" // ClusterStateUnknown Indicates that a cluster is in an unknown state. A cluster should never be in this state. ClusterStateUnknown = "UNKNOWN" )
const DefaultProvisionTimeout = 30 * time.Minute
DefaultProvisionTimeout ...
const DefaultTimeout = 20 * time.Minute
DefaultTimeout is the default amount of time that Terraform will wait when creating, updating and deleting pipelines.
Variables ¶
This section is empty.
Functions ¶
func CommonEnvironmentClientWithRealCommandExecutor ¶
func CommonEnvironmentClientWithRealCommandExecutor() *common.DatabricksClient
CommonEnvironmentClientWithRealCommandExecutor is good for internal tests
func CommonInstancePoolID ¶
func CommonInstancePoolID() string
CommonInstancePoolID returns common instance pool that is supposed to be used for internal testing purposes
func DataSourceClusterZones ¶
DataSourceClusterZones ...
func DataSourceNodeType ¶
DataSourceNodeType returns smallest node depedning on the cloud
func DataSourceSparkVersion ¶
DataSourceSparkVersion returns DBR version matching to the specification
func ResourceCluster ¶
ResourceCluster - returns Cluster resource description
func ResourceClusterPolicy ¶
ResourceClusterPolicy ...
func ResourcePipeline ¶ added in v0.3.2
ResourcePipeline defines the Terraform resource for pipelines.
Types ¶
type AutoScale ¶
type AutoScale struct { MinWorkers int32 `json:"min_workers,omitempty"` MaxWorkers int32 `json:"max_workers,omitempty"` }
AutoScale is a struct the describes auto scaling for clusters
type Availability ¶ added in v0.3.2
type Availability string
Availability is a type for describing AWS availability on cluster nodes
type AwsAttributes ¶
type AwsAttributes struct { FirstOnDemand int32 `json:"first_on_demand,omitempty" tf:"computed"` Availability Availability `json:"availability,omitempty" tf:"computed"` ZoneID string `json:"zone_id,omitempty" tf:"computed"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` SpotBidPricePercent int32 `json:"spot_bid_price_percent,omitempty" tf:"computed"` EbsVolumeType EbsVolumeType `json:"ebs_volume_type,omitempty" tf:"computed"` EbsVolumeCount int32 `json:"ebs_volume_count,omitempty" tf:"computed"` EbsVolumeSize int32 `json:"ebs_volume_size,omitempty" tf:"computed"` }
AwsAttributes encapsulates the aws attributes for aws based clusters https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterclusterattributes
type AzureAttributes ¶ added in v0.3.2
type AzureAttributes struct { FirstOnDemand int32 `json:"first_on_demand,omitempty" tf:"computed"` Availability Availability `json:"availability,omitempty" tf:"computed"` SpotBidMaxPrice float64 `json:"spot_bid_max_price,omitempty" tf:"computed"` }
AzureAttributes encapsulates the Azure attributes for Azure based clusters https://docs.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/clusters#clusterazureattributes
type AzureDiskVolumeType ¶
type AzureDiskVolumeType string
AzureDiskVolumeType is disk type on azure vms
type Cluster ¶
type Cluster struct { ClusterID string `json:"cluster_id,omitempty"` ClusterName string `json:"cluster_name,omitempty"` SparkVersion string `json:"spark_version"` // TODO: perhaps make a default NumWorkers int32 `json:"num_workers" tf:"group:size"` Autoscale *AutoScale `json:"autoscale,omitempty" tf:"group:size"` EnableElasticDisk bool `json:"enable_elastic_disk,omitempty" tf:"computed"` EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty" tf:"computed"` NodeTypeID string `json:"node_type_id,omitempty" tf:"group:node_type,computed"` DriverNodeTypeID string `json:"driver_node_type_id,omitempty" tf:"group:node_type,computed"` InstancePoolID string `json:"instance_pool_id,omitempty" tf:"group:node_type"` DriverInstancePoolID string `json:"driver_instance_pool_id,omitempty" tf:"group:node_type,computed"` PolicyID string `json:"policy_id,omitempty"` AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty" tf:"conflicts:instance_pool_id,suppress_diff"` AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty" tf:"conflicts:instance_pool_id,suppress_diff"` GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty" tf:"conflicts:instance_pool_id,suppress_diff"` AutoterminationMinutes int32 `json:"autotermination_minutes,omitempty"` SparkConf map[string]string `json:"spark_conf,omitempty"` SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` SSHPublicKeys []string `json:"ssh_public_keys,omitempty" tf:"max_items:10"` InitScripts []InitScriptStorageInfo `json:"init_scripts,omitempty" tf:"max_items:10"` // TODO: tf:alias ClusterLogConf *StorageInfo `json:"cluster_log_conf,omitempty"` DockerImage *DockerImage `json:"docker_image,omitempty"` SingleUserName string `json:"single_user_name,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty" tf:"force_new"` }
Cluster contains the information when trying to submit api calls or editing a cluster
type ClusterCloudProviderNodeInfo ¶
type ClusterCloudProviderNodeInfo struct { Status []string `json:"status,omitempty"` AvailableCoreQuota float32 `json:"available_core_quota,omitempty"` TotalCoreQuota float32 `json:"total_core_quota,omitempty"` }
ClusterCloudProviderNodeInfo encapsulates the existing quota available from the cloud service provider.
type ClusterEvent ¶
type ClusterEvent struct { ClusterID string `json:"cluster_id"` Timestamp int64 `json:"timestamp"` Type ClusterEventType `json:"type"` Details EventDetails `json:"details"` }
ClusterEvent - event information https://docs.databricks.com/dev-tools/api/latest/clusters.html#clustereventsclusterevent
type ClusterEventType ¶
type ClusterEventType string
ClusterEventType - constants for API
const ( EvTypeCreating ClusterEventType = "CREATING" EvTypeDidNotExpandDisk ClusterEventType = "DID_NOT_EXPAND_DISK" EvTypeExpandedDisk ClusterEventType = "EXPANDED_DISK" EvTypeFailedToExpandDisk ClusterEventType = "FAILED_TO_EXPAND_DISK" EvTypeInitScriptsStarting ClusterEventType = "INIT_SCRIPTS_STARTING" EvTypeInitScriptsFinished ClusterEventType = "INIT_SCRIPTS_FINISHED" EvTypeStarting ClusterEventType = "STARTING" EvTypeRestarting ClusterEventType = "RESTARTING" EvTypeTerminating ClusterEventType = "TERMINATING" EvTypeEdited ClusterEventType = "EDITED" EvTypeRunning ClusterEventType = "RUNNING" EvTypeResizing ClusterEventType = "RESIZING" EvTypeUpsizeCompleted ClusterEventType = "UPSIZE_COMPLETED" EvTypeNodesLost ClusterEventType = "NODES_LOST" EvTypeDriverHealthy ClusterEventType = "DRIVER_HEALTHY" EvTypeSparkException ClusterEventType = "SPARK_EXCEPTION" EvTypeDriverNotResponding ClusterEventType = "DRIVER_NOT_RESPONDING" EvTypeDbfsDown ClusterEventType = "DBFS_DOWN" EvTypeMetastoreDown ClusterEventType = "METASTORE_DOWN" EvTypeNodeBlacklisted ClusterEventType = "NODE_BLACKLISTED" EvTypePinned ClusterEventType = "PINNED" EvTypeUnpinned ClusterEventType = "UNPINNED" )
Constants for Event Types
type ClusterID ¶
type ClusterID struct {
ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"`
}
ClusterID holds cluster ID
type ClusterInfo ¶
type ClusterInfo struct { NumWorkers int32 `json:"num_workers,omitempty"` AutoScale *AutoScale `json:"autoscale,omitempty"` ClusterID string `json:"cluster_id,omitempty"` CreatorUserName string `json:"creator_user_name,omitempty"` Driver *SparkNode `json:"driver,omitempty"` Executors []SparkNode `json:"executors,omitempty"` SparkContextID int64 `json:"spark_context_id,omitempty"` JdbcPort int32 `json:"jdbc_port,omitempty"` ClusterName string `json:"cluster_name,omitempty"` SparkVersion string `json:"spark_version"` SparkConf map[string]string `json:"spark_conf,omitempty"` AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"` GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"` NodeTypeID string `json:"node_type_id,omitempty"` DriverNodeTypeID string `json:"driver_node_type_id,omitempty"` SSHPublicKeys []string `json:"ssh_public_keys,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` ClusterLogConf *StorageInfo `json:"cluster_log_conf,omitempty"` InitScripts []StorageInfo `json:"init_scripts,omitempty"` SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` AutoterminationMinutes int32 `json:"autotermination_minutes,omitempty"` EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` InstancePoolID string `json:"instance_pool_id,omitempty"` DriverInstancePoolID string `json:"driver_instance_pool_id,omitempty" tf:"computed"` PolicyID string `json:"policy_id,omitempty"` SingleUserName string `json:"single_user_name,omitempty"` ClusterSource Availability `json:"cluster_source,omitempty"` DockerImage *DockerImage `json:"docker_image,omitempty"` State ClusterState `json:"state"` StateMessage string `json:"state_message,omitempty"` StartTime int64 `json:"start_time,omitempty"` TerminateTime int64 `json:"terminate_time,omitempty"` LastStateLossTime int64 `json:"last_state_loss_time,omitempty"` LastActivityTime int64 `json:"last_activity_time,omitempty"` ClusterMemoryMb int64 `json:"cluster_memory_mb,omitempty"` ClusterCores float32 `json:"cluster_cores,omitempty"` DefaultTags map[string]string `json:"default_tags"` ClusterLogStatus *LogSyncStatus `json:"cluster_log_status,omitempty"` TerminationReason *TerminationReason `json:"termination_reason,omitempty"` }
ClusterInfo contains the information when getting cluster info from the get request.
func NewTinyClusterInCommonPool ¶
func NewTinyClusterInCommonPool() (c ClusterInfo, err error)
NewTinyClusterInCommonPool creates new cluster for short-lived purposes
func NewTinyClusterInCommonPoolPossiblyReused ¶
func NewTinyClusterInCommonPoolPossiblyReused() (c ClusterInfo)
NewTinyClusterInCommonPoolPossiblyReused is recommended to be used for testing only
func (*ClusterInfo) IsRunningOrResizing ¶
func (ci *ClusterInfo) IsRunningOrResizing() bool
IsRunningOrResizing returns true if cluster is running or resizing
type ClusterLibraryList ¶
type ClusterLibraryList struct { ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"` Libraries []Library `json:"libraries,omitempty" url:"libraries,omitempty" tf:"slice_set,alias:library"` }
ClusterLibraryList is request body for install and uninstall
func (*ClusterLibraryList) Diff ¶
func (cll *ClusterLibraryList) Diff(cls ClusterLibraryStatuses) (ClusterLibraryList, ClusterLibraryList)
Diff returns install/uninstall lists given a cluster lib status
type ClusterLibraryStatuses ¶
type ClusterLibraryStatuses struct { ClusterID string `json:"cluster_id,omitempty"` LibraryStatuses []LibraryStatus `json:"library_statuses,omitempty"` }
ClusterLibraryStatuses A status will be available for all libraries installed on the cluster via the API or the libraries UI as well as libraries set to be installed on all clusters via the libraries UI. If a library has been set to be installed on all clusters, is_library_for_all_clusters will be true, even if the library was also installed on the cluster.
func (ClusterLibraryStatuses) IsRetryNeeded ¶
func (cls ClusterLibraryStatuses) IsRetryNeeded() (bool, error)
IsRetryNeeded returns first bool if there needs to be retry. If there needs to be retry, error message will explain why. If retry does not need to happen and error is not nil - it failed.
func (ClusterLibraryStatuses) ToLibraryList ¶
func (cls ClusterLibraryStatuses) ToLibraryList() ClusterLibraryList
ToLibraryList convert to envity for convenient comparison
type ClusterList ¶
type ClusterList struct {
Clusters []ClusterInfo `json:"clusters,omitempty"`
}
ClusterList shows existing clusters
type ClusterPoliciesAPI ¶
type ClusterPoliciesAPI struct {
// contains filtered or unexported fields
}
ClusterPoliciesAPI struct for cluster policies API
func NewClusterPoliciesAPI ¶
func NewClusterPoliciesAPI(ctx context.Context, m interface{}) ClusterPoliciesAPI
NewClusterPoliciesAPI creates ClusterPoliciesAPI instance from provider meta Creation and editing is available to admins only.
func (ClusterPoliciesAPI) Create ¶
func (a ClusterPoliciesAPI) Create(clusterPolicy *ClusterPolicy) error
Create creates new cluster policy and sets PolicyID
func (ClusterPoliciesAPI) Delete ¶
func (a ClusterPoliciesAPI) Delete(policyID string) error
Delete removes cluster policy
func (ClusterPoliciesAPI) Edit ¶
func (a ClusterPoliciesAPI) Edit(clusterPolicy *ClusterPolicy) error
Edit will update an existing policy. This may make some clusters governed by this policy invalid. For such clusters the next cluster edit must provide a confirming configuration, but otherwise they can continue to run.
func (ClusterPoliciesAPI) Get ¶
func (a ClusterPoliciesAPI) Get(policyID string) (policy ClusterPolicy, err error)
Get returns cluster policy
type ClusterPolicy ¶
type ClusterPolicy struct { PolicyID string `json:"policy_id,omitempty"` Name string `json:"name"` Definition string `json:"definition"` CreatedAtTimeStamp int64 `json:"created_at_timestamp"` }
ClusterPolicy defines cluster policy
type ClusterPolicyCreate ¶
ClusterPolicyCreate is the endity used for request
type ClusterSize ¶
type ClusterSize struct { NumWorkers int32 `json:"num_workers"` AutoScale *AutoScale `json:"autoscale"` }
ClusterSize is structure to keep https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterclustersize
type ClusterState ¶
type ClusterState string
ClusterState is for describing possible cluster states
func (ClusterState) CanReach ¶
func (state ClusterState) CanReach(desired ClusterState) bool
CanReach returns true if cluster state can reach desired state
type ClustersAPI ¶
type ClustersAPI struct {
// contains filtered or unexported fields
}
ClustersAPI is a struct that contains the Databricks api client to perform queries
func NewClustersAPI ¶
func NewClustersAPI(ctx context.Context, m interface{}) ClustersAPI
NewClustersAPI creates ClustersAPI instance from provider meta
func (ClustersAPI) Create ¶
func (a ClustersAPI) Create(cluster Cluster) (info ClusterInfo, err error)
Create creates a new Spark cluster and waits till it's running
func (ClustersAPI) Edit ¶
func (a ClustersAPI) Edit(cluster Cluster) (info ClusterInfo, err error)
Edit edits the configuration of a cluster to match the provided attributes and size
func (ClustersAPI) Events ¶
func (a ClustersAPI) Events(eventsRequest EventsRequest) ([]ClusterEvent, error)
Events - only using Cluster ID string to get all events https://docs.databricks.com/dev-tools/api/latest/clusters.html#events
func (ClustersAPI) Get ¶
func (a ClustersAPI) Get(clusterID string) (ci ClusterInfo, err error)
Get retrieves the information for a cluster given its identifier
func (ClustersAPI) GetOrCreateRunningCluster ¶
func (a ClustersAPI) GetOrCreateRunningCluster(name string, custom ...Cluster) (c ClusterInfo, err error)
GetOrCreateRunningCluster creates an autoterminating cluster if it doesn't exist
func (ClustersAPI) GetSmallestNodeType ¶
func (a ClustersAPI) GetSmallestNodeType(r NodeTypeRequest) string
GetSmallestNodeType returns smallest (or default) node type id given the criteria
func (ClustersAPI) LatestSparkVersion ¶
func (a ClustersAPI) LatestSparkVersion(svr SparkVersionRequest) (string, error)
LatestSparkVersion returns latest version matching the request parameters
func (ClustersAPI) LatestSparkVersionOrDefault ¶
func (a ClustersAPI) LatestSparkVersionOrDefault(svr SparkVersionRequest) string
LatestSparkVersionOrDefault returns Spark version matching the definition, or default in case of error
func (ClustersAPI) List ¶
func (a ClustersAPI) List() ([]ClusterInfo, error)
List return information about all pinned clusters, currently active clusters, up to 70 of the most recently terminated interactive clusters in the past 30 days, and up to 30 of the most recently terminated job clusters in the past 30 days
func (ClustersAPI) ListNodeTypes ¶
func (a ClustersAPI) ListNodeTypes() (l NodeTypeList, err error)
ListNodeTypes returns a sorted list of supported Spark node types
func (ClustersAPI) ListSparkVersions ¶
func (a ClustersAPI) ListSparkVersions() (SparkVersionsList, error)
ListSparkVersions returns smallest (or default) node type id given the criteria
func (ClustersAPI) ListZones ¶
func (a ClustersAPI) ListZones() (ZonesInfo, error)
ListZones returns the zones info sent by the cloud service provider
func (ClustersAPI) PermanentDelete ¶
func (a ClustersAPI) PermanentDelete(clusterID string) error
PermanentDelete permanently delete a cluster
func (ClustersAPI) Pin ¶
func (a ClustersAPI) Pin(clusterID string) error
Pin ensure that an interactive cluster configuration is retained even after a cluster has been terminated for more than 30 days
func (ClustersAPI) Restart ¶
func (a ClustersAPI) Restart(clusterID string) error
Restart restart a Spark cluster given its ID. If the cluster is not in a RUNNING state, nothing will happen.
func (ClustersAPI) Start ¶
func (a ClustersAPI) Start(clusterID string) error
Start a terminated Spark cluster given its ID and wait till it's running
func (ClustersAPI) StartAndGetInfo ¶
func (a ClustersAPI) StartAndGetInfo(clusterID string) (ClusterInfo, error)
StartAndGetInfo starts cluster and returns info
func (ClustersAPI) Terminate ¶
func (a ClustersAPI) Terminate(clusterID string) error
Terminate terminates a Spark cluster given its ID
func (ClustersAPI) Unpin ¶
func (a ClustersAPI) Unpin(clusterID string) error
Unpin allows the cluster to eventually be removed from the list returned by the List API
type Command ¶
type Command struct { ID string `json:"id,omitempty"` Status string `json:"status,omitempty"` Results *common.CommandResults `json:"results,omitempty"` }
Command is the struct that contains what the 1.2 api returns for the commands api
type CommandsAPI ¶
type CommandsAPI struct {
// contains filtered or unexported fields
}
CommandsAPI exposes the Context & Commands API
func NewCommandsAPI ¶
func NewCommandsAPI(ctx context.Context, m interface{}) CommandsAPI
NewCommandsAPI creates CommandsAPI instance from provider meta
func (CommandsAPI) Execute ¶
func (a CommandsAPI) Execute(clusterID, language, commandStr string) common.CommandResults
Execute creates a spark context and executes a command and then closes context Any leading whitespace is trimmed
type CronSchedule ¶
type CronSchedule struct { QuartzCronExpression string `json:"quartz_cron_expression"` TimezoneID string `json:"timezone_id"` PauseStatus string `json:"pause_status,omitempty" tf:"computed"` }
CronSchedule contains the information for the quartz cron expression
type DbfsStorageInfo ¶
type DbfsStorageInfo struct {
Destination string `json:"destination"`
}
DbfsStorageInfo contains the destination string for DBFS
type DockerBasicAuth ¶
type DockerBasicAuth struct { Username string `json:"username" tf:"force_new"` Password string `json:"password" tf:"force_new"` }
DockerBasicAuth contains the auth information when fetching containers
type DockerImage ¶
type DockerImage struct { URL string `json:"url" tf:"force_new"` BasicAuth *DockerBasicAuth `json:"basic_auth,omitempty" tf:"force_new"` }
DockerImage contains the image url and the auth for DCS
type EmailNotifications ¶ added in v0.3.9
type EmailNotifications struct { OnStart []string `json:"on_start,omitempty"` OnSuccess []string `json:"on_success,omitempty"` OnFailure []string `json:"on_failure,omitempty"` NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` }
EmailNotifications contains the information for email notifications after job completion
type EventDetails ¶
type EventDetails struct { CurrentNumWorkers int32 `json:"current_num_workers,omitempty"` TargetNumWorkers int32 `json:"target_num_workers,omitempty"` PreviousAttributes *AwsAttributes `json:"previous_attributes,omitempty"` Attributes *AwsAttributes `json:"attributes,omitempty"` PreviousClusterSize *ClusterSize `json:"previous_cluster_size,omitempty"` ClusterSize *ClusterSize `json:"cluster_size,omitempty"` ResizeCause *ResizeCause `json:"cause,omitempty"` Reason *TerminationReason `json:"reason,omitempty"` User string `json:"user"` }
EventDetails - details about specific events https://docs.databricks.com/dev-tools/api/latest/clusters.html#clustereventseventdetails
type EventsRequest ¶
type EventsRequest struct { ClusterID string `json:"cluster_id"` StartTime int64 `json:"start_time,omitempty"` EndTime int64 `json:"end_time,omitempty"` Order SortOrder `json:"order,omitempty"` EventTypes []ClusterEventType `json:"event_types,omitempty"` Offset int64 `json:"offset,omitempty"` Limit int64 `json:"limit,omitempty"` MaxItems uint `json:"-"` }
EventsRequest - request structure https://docs.databricks.com/dev-tools/api/latest/clusters.html#request-structure
type EventsResponse ¶
type EventsResponse struct { Events []ClusterEvent `json:"events"` NextPage *EventsRequest `json:"next_page"` TotalCount int64 `json:"total_count"` }
EventsResponse - answer from API https://docs.databricks.com/dev-tools/api/latest/clusters.html#response-structure
type GcpAttributes ¶ added in v0.3.2
type GcpAttributes struct { UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty" tf:"computed"` GoogleServiceAccount string `json:"google_service_account,omitempty" tf:"computed"` }
GcpAttributes encapsultes GCP specific attributes https://docs.gcp.databricks.com/dev-tools/api/latest/clusters.html#clustergcpattributes
type InitScriptStorageInfo ¶ added in v0.3.2
type InitScriptStorageInfo struct { Dbfs *DbfsStorageInfo `json:"dbfs,omitempty" tf:"group:storage"` S3 *S3StorageInfo `json:"s3,omitempty" tf:"group:storage"` File *LocalFileInfo `json:"file,omitempty" tf:"optional"` }
InitScriptStorageInfo captures the allowed sources of init scripts.
type InstancePool ¶
type InstancePool struct { InstancePoolID string `json:"instance_pool_id,omitempty" tf:"computed"` InstancePoolName string `json:"instance_pool_name"` MinIdleInstances int32 `json:"min_idle_instances,omitempty"` MaxCapacity int32 `json:"max_capacity,omitempty"` IdleInstanceAutoTerminationMinutes int32 `json:"idle_instance_autotermination_minutes"` AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty" tf:"force_new,suppress_diff"` AzureAttributes *InstancePoolAzureAttributes `json:"azure_attributes,omitempty" tf:"force_new,suppress_diff"` NodeTypeID string `json:"node_type_id" tf:"force_new"` CustomTags map[string]string `json:"custom_tags,omitempty" tf:"force_new"` EnableElasticDisk bool `json:"enable_elastic_disk,omitempty" tf:"force_new"` DiskSpec *InstancePoolDiskSpec `json:"disk_spec,omitempty" tf:"force_new"` PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty" tf:"force_new"` PreloadedDockerImages []DockerImage `json:"preloaded_docker_images,omitempty" tf:"force_new,slice_set,alias:preloaded_docker_image"` }
InstancePool describes the instance pool object on Databricks
type InstancePoolAndStats ¶
type InstancePoolAndStats struct { InstancePoolID string `json:"instance_pool_id,omitempty" tf:"computed"` InstancePoolName string `json:"instance_pool_name"` MinIdleInstances int32 `json:"min_idle_instances,omitempty"` MaxCapacity int32 `json:"max_capacity,omitempty"` AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *InstancePoolAzureAttributes `json:"azure_attributes,omitempty"` NodeTypeID string `json:"node_type_id"` DefaultTags map[string]string `json:"default_tags,omitempty" tf:"computed"` CustomTags map[string]string `json:"custom_tags,omitempty"` IdleInstanceAutoTerminationMinutes int32 `json:"idle_instance_autotermination_minutes"` EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` DiskSpec *InstancePoolDiskSpec `json:"disk_spec,omitempty"` PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"` State string `json:"state,omitempty"` Stats *InstancePoolStats `json:"stats,omitempty"` PreloadedDockerImages []DockerImage `json:"preloaded_docker_images,omitempty" tf:"slice_set,alias:preloaded_docker_image"` }
InstancePoolAndStats encapsulates a get response from the GET api for instance pools on Databricks
type InstancePoolAwsAttributes ¶
type InstancePoolAwsAttributes struct { Availability Availability `json:"availability,omitempty" tf:"force_new"` ZoneID string `json:"zone_id,omitempty" tf:"computed,force_new"` SpotBidPricePercent int32 `json:"spot_bid_price_percent,omitempty" tf:"force_new"` }
InstancePoolAwsAttributes contains aws attributes for AWS Databricks deployments for instance pools
type InstancePoolAzureAttributes ¶ added in v0.3.2
type InstancePoolAzureAttributes struct { Availability Availability `json:"availability,omitempty" tf:"force_new"` SpotBidMaxPrice float64 `json:"spot_bid_max_price,omitempty" tf:"force_new"` }
InstancePoolAzureAttributes contains aws attributes for Azure Databricks deployments for instance pools https://docs.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/instance-pools#clusterinstancepoolazureattributes
type InstancePoolDiskSpec ¶
type InstancePoolDiskSpec struct { DiskType *InstancePoolDiskType `json:"disk_type,omitempty"` DiskCount int32 `json:"disk_count,omitempty"` DiskSize int32 `json:"disk_size,omitempty"` }
InstancePoolDiskSpec contains disk size, type and count information for the pool
type InstancePoolDiskType ¶
type InstancePoolDiskType struct { AzureDiskVolumeType string `json:"azure_disk_volume_type,omitempty" tf:"force_new"` EbsVolumeType string `json:"ebs_volume_type,omitempty" tf:"force_new"` }
InstancePoolDiskType contains disk type information for each of the different cloud service providers
type InstancePoolList ¶
type InstancePoolList struct {
InstancePools []InstancePoolAndStats `json:"instance_pools"`
}
InstancePoolList shows list of instance pools
type InstancePoolStats ¶
type InstancePoolStats struct { UsedCount int32 `json:"used_count,omitempty"` IdleCount int32 `json:"idle_count,omitempty"` PendingUsedCount int32 `json:"pending_used_count,omitempty"` PendingIdleCount int32 `json:"pending_idle_count,omitempty"` }
InstancePoolStats contains the stats on a given pool
type InstancePoolsAPI ¶
type InstancePoolsAPI struct {
// contains filtered or unexported fields
}
InstancePoolsAPI exposes the instance pools api
func NewInstancePoolsAPI ¶
func NewInstancePoolsAPI(ctx context.Context, m interface{}) InstancePoolsAPI
NewInstancePoolsAPI creates InstancePoolsAPI instance from provider meta
func (InstancePoolsAPI) Create ¶
func (a InstancePoolsAPI) Create(instancePool InstancePool) (InstancePoolAndStats, error)
Create creates the instance pool to given the instance pool configuration
func (InstancePoolsAPI) Delete ¶
func (a InstancePoolsAPI) Delete(instancePoolID string) error
Delete terminates a instance pool given its ID
func (InstancePoolsAPI) List ¶
func (a InstancePoolsAPI) List() (ipl InstancePoolList, err error)
List retrieves the list of existing instance pools
func (InstancePoolsAPI) Read ¶
func (a InstancePoolsAPI) Read(instancePoolID string) (ip InstancePool, err error)
Read retrieves the information for a instance pool given its identifier
func (InstancePoolsAPI) Update ¶
func (a InstancePoolsAPI) Update(ip InstancePool) error
Update edits the configuration of a instance pool to match the provided attributes and size
type Job ¶
type Job struct { JobID int64 `json:"job_id,omitempty"` CreatorUserName string `json:"creator_user_name,omitempty"` Settings *JobSettings `json:"settings,omitempty"` CreatedTime int64 `json:"created_time,omitempty"` }
Job contains the information when using a GET request from the Databricks Jobs api
type JobRun ¶
type JobRun struct { JobID int64 `json:"job_id"` RunID int64 `json:"run_id"` NumberInJob int64 `json:"number_in_job"` StartTime int64 `json:"start_time,omitempty"` State RunState `json:"state"` Trigger string `json:"trigger,omitempty"` RuntType string `json:"run_type,omitempty"` OverridingParameters RunParameters `json:"overriding_parameters,omitempty"` }
JobRun is a simplified representation of corresponding entity
type JobRunsList ¶
JobRunsList ..
type JobRunsListRequest ¶
type JobRunsListRequest struct { JobID int64 `url:"job_id,omitempty"` ActiveOnly bool `url:"active_only,omitempty"` CompletedOnly bool `url:"completed_only,omitempty"` Offset int32 `url:"offset,omitempty"` Limit int32 `url:"limit,omitempty"` }
JobRunsListRequest ...
type JobSettings ¶
type JobSettings struct { Name string `json:"name,omitempty" tf:"default:Untitled"` // BEGIN Jobs API 2.0 ExistingClusterID string `json:"existing_cluster_id,omitempty" tf:"group:cluster_type"` NewCluster *Cluster `json:"new_cluster,omitempty" tf:"group:cluster_type"` NotebookTask *NotebookTask `json:"notebook_task,omitempty" tf:"group:task_type"` SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty" tf:"group:task_type"` SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty" tf:"group:task_type"` SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty" tf:"group:task_type"` PipelineTask *PipelineTask `json:"pipeline_task,omitempty" tf:"group:task_type"` PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty" tf:"group:task_type"` Libraries []Library `json:"libraries,omitempty" tf:"slice_set,alias:library"` TimeoutSeconds int32 `json:"timeout_seconds,omitempty"` MaxRetries int32 `json:"max_retries,omitempty"` MinRetryIntervalMillis int32 `json:"min_retry_interval_millis,omitempty"` RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` // BEGIN Jobs API 2.1 Tasks []JobTaskSettings `json:"tasks,omitempty" tf:"alias:task"` Format string `json:"format,omitempty" tf:"computed"` Schedule *CronSchedule `json:"schedule,omitempty"` MaxConcurrentRuns int32 `json:"max_concurrent_runs,omitempty"` EmailNotifications *EmailNotifications `json:"email_notifications,omitempty" tf:"suppress_diff"` }
JobSettings contains the information for configuring a job on databricks
type JobTaskSettings ¶ added in v0.3.9
type JobTaskSettings struct { TaskKey string `json:"task_key,omitempty"` Description string `json:"description,omitempty"` DependsOn []TaskDependency `json:"depends_on,omitempty"` ExistingClusterID string `json:"existing_cluster_id,omitempty" tf:"group:cluster_type"` NewCluster *Cluster `json:"new_cluster,omitempty" tf:"group:cluster_type"` Libraries []Library `json:"libraries,omitempty" tf:"slice_set,alias:library"` NotebookTask *NotebookTask `json:"notebook_task,omitempty" tf:"group:task_type"` SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty" tf:"group:task_type"` SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty" tf:"group:task_type"` SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty" tf:"group:task_type"` PipelineTask *PipelineTask `json:"pipeline_task,omitempty" tf:"group:task_type"` PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty" tf:"group:task_type"` EmailNotifications *EmailNotifications `json:"email_notifications,omitempty" tf:"suppress_diff"` TimeoutSeconds int32 `json:"timeout_seconds,omitempty"` MaxRetries int32 `json:"max_retries,omitempty"` MinRetryIntervalMillis int32 `json:"min_retry_interval_millis,omitempty"` RetryOnTimeout bool `json:"retry_on_timeout,omitempty" tf:"computed"` }
type JobsAPI ¶
type JobsAPI struct {
// contains filtered or unexported fields
}
JobsAPI exposes the Jobs API
func NewJobsAPI ¶
NewJobsAPI creates JobsAPI instance from provider meta
func (JobsAPI) Create ¶
func (a JobsAPI) Create(jobSettings JobSettings) (Job, error)
Create creates a job on the workspace given the job settings
func (JobsAPI) RunsCancel ¶ added in v0.3.6
RunsCancel ...
func (JobsAPI) RunsList ¶
func (a JobsAPI) RunsList(r JobRunsListRequest) (jrl JobRunsList, err error)
RunsList ...
type LibrariesAPI ¶
type LibrariesAPI struct {
// contains filtered or unexported fields
}
LibrariesAPI exposes the Library API
func NewLibrariesAPI ¶
func NewLibrariesAPI(ctx context.Context, m interface{}) LibrariesAPI
NewLibrariesAPI creates LibrariesAPI instance from provider meta
func (LibrariesAPI) ClusterStatus ¶
func (a LibrariesAPI) ClusterStatus(clusterID string) (cls ClusterLibraryStatuses, err error)
ClusterStatus returns library status in cluster
func (LibrariesAPI) Install ¶
func (a LibrariesAPI) Install(req ClusterLibraryList) error
Install library list on cluster
func (LibrariesAPI) Uninstall ¶
func (a LibrariesAPI) Uninstall(req ClusterLibraryList) error
Uninstall library list from cluster
type Library ¶
type Library struct { Jar string `json:"jar,omitempty" tf:"group:lib"` Egg string `json:"egg,omitempty" tf:"group:lib"` // TODO: add name validation for wheel libraries. Whl string `json:"whl,omitempty" tf:"group:lib"` Pypi *PyPi `json:"pypi,omitempty" tf:"group:lib"` Maven *Maven `json:"maven,omitempty" tf:"group:lib"` Cran *Cran `json:"cran,omitempty" tf:"group:lib"` }
Library is a construct that contains information of the location of the library and how to download it
func (Library) TypeAndKey ¶
TypeAndKey can be used for computing differences
type LibraryStatus ¶
type LibraryStatus struct { Library *Library `json:"library,omitempty"` Status string `json:"status,omitempty"` IsLibraryInstalledOnAllClusters bool `json:"is_library_for_all_clusters,omitempty"` Messages []string `json:"messages,omitempty"` }
LibraryStatus is the status on a given cluster when using the libraries status api
type LocalFileInfo ¶ added in v0.3.2
type LocalFileInfo struct {
Destination string `json:"destination,omitempty" tf:"optional"`
}
LocalFileInfo represents a local file on disk, e.g. in a customer's container.
type LogSyncStatus ¶
type LogSyncStatus struct { LastAttempted int64 `json:"last_attempted,omitempty"` LastException string `json:"last_exception,omitempty"` }
LogSyncStatus encapsulates when the cluster logs were last delivered.
type Maven ¶
type Maven struct { Coordinates string `json:"coordinates"` Repo string `json:"repo,omitempty"` Exclusions []string `json:"exclusions,omitempty"` }
Maven is a jar library hosted on Maven
type NodeInstanceType ¶
type NodeInstanceType struct { InstanceTypeID string `json:"instance_type_id,omitempty"` LocalDisks int32 `json:"local_disks,omitempty"` LocalDiskSizeGB int32 `json:"local_disk_size_gb,omitempty"` LocalNVMeDisks int32 `json:"local_nvme_disks,omitempty"` LocalNVMeDiskSizeGB int32 `json:"local_nvme_disk_size_gb,omitempty"` }
NodeInstanceType encapsulates information about a specific node type
type NodeType ¶
type NodeType struct { NodeTypeID string `json:"node_type_id,omitempty"` MemoryMB int32 `json:"memory_mb,omitempty"` NumCores float32 `json:"num_cores,omitempty"` NumGPUs int32 `json:"num_gpus,omitempty"` SupportEBSVolumes bool `json:"support_ebs_volumes,omitempty"` IsIOCacheEnabled bool `json:"is_io_cache_enabled,omitempty"` SupportPortForwarding bool `json:"support_port_forwarding,omitempty"` Description string `json:"description,omitempty"` Category string `json:"category,omitempty"` InstanceTypeID string `json:"instance_type_id,omitempty"` IsDeprecated bool `json:"is_deprecated,omitempty"` IsHidden bool `json:"is_hidden,omitempty"` SupportClusterTags bool `json:"support_cluster_tags,omitempty"` DisplayOrder int32 `json:"display_order,omitempty"` NodeInfo *ClusterCloudProviderNodeInfo `json:"node_info,omitempty"` NodeInstanceType *NodeInstanceType `json:"node_instance_type,omitempty"` PhotonWorkerCapable bool `json:"photon_worker_capable,omitempty"` PhotonDriverCapable bool `json:"photon_driver_capable,omitempty"` }
NodeType encapsulates information about a given node when using the list-node-types api
type NodeTypeList ¶
type NodeTypeList struct {
NodeTypes []NodeType `json:"node_types,omitempty"`
}
NodeTypeList contains a list of node types
type NodeTypeRequest ¶
type NodeTypeRequest struct { MinMemoryGB int32 `json:"min_memory_gb,omitempty"` GBPerCore int32 `json:"gb_per_core,omitempty"` MinCores int32 `json:"min_cores,omitempty"` MinGPUs int32 `json:"min_gpus,omitempty"` LocalDisk bool `json:"local_disk,omitempty"` Category string `json:"category,omitempty"` PhotonWorkerCapable bool `json:"photon_worker_capable,omitempty"` PhotonDriverCapable bool `json:"photon_driver_capable,omitempty"` IsIOCacheEnabled bool `json:"is_io_cache_enabled,omitempty"` SupportPortForwarding bool `json:"support_port_forwarding,omitempty"` }
NodeTypeRequest is a wrapper for local filtering of node types
type NotebookTask ¶
type NotebookTask struct { NotebookPath string `json:"notebook_path"` BaseParameters map[string]string `json:"base_parameters,omitempty"` }
NotebookTask contains the information for notebook jobs
type PipelineHealthStatus ¶ added in v0.3.2
type PipelineHealthStatus string
PipelineHealthStatus ...
const ( HealthStatusHealthy PipelineHealthStatus = "HEALTHY" HealthStatusUnhealthy PipelineHealthStatus = "UNHEALTHY" )
Constants for PipelineHealthStatus
type PipelineState ¶ added in v0.3.2
type PipelineState string
PipelineState ...
const ( StateDeploying PipelineState = "DEPLOYING" StateStarting PipelineState = "STARTING" StateRunning PipelineState = "RUNNING" StateStopping PipelineState = "STOPPPING" StateDeleted PipelineState = "DELETED" StateRecovering PipelineState = "RECOVERING" StateFailed PipelineState = "FAILED" StateResetting PipelineState = "RESETTING" StateIdle PipelineState = "IDLE" )
Constants for PipelineStates
type PipelineTask ¶ added in v0.3.10
type PipelineTask struct {
PipelineID string `json:"pipeline_id"`
}
PipelineTask contains the information for pipeline jobs
type PythonWheelTask ¶ added in v0.3.10
type PythonWheelTask struct { EntryPoint string `json:"entry_point,omitempty"` PackageName string `json:"package_name,omitempty"` Parameters []string `json:"parameters,omitempty"` NamedParameters map[string]string `json:"named_parameters,omitempty"` }
PythonWheelTask contains the information for python wheel jobs
type RunParameters ¶
type RunParameters struct { // a shortcut field to reuse this type for RunNow JobID int64 `json:"job_id,omitempty"` NotebookParams map[string]string `json:"notebook_params,omitempty"` JarParams []string `json:"jar_params,omitempty"` PythonParams []string `json:"python_params,omitempty"` SparkSubmitParams []string `json:"spark_submit_params,omitempty"` }
RunParameters ...
type RunState ¶
type RunState struct { ResultState string `json:"result_state,omitempty"` LifeCycleState string `json:"life_cycle_state,omitempty"` StateMessage string `json:"state_message,omitempty"` }
RunState ...
type S3StorageInfo ¶
type S3StorageInfo struct { // TODO: add instance profile validation check + prefix validation Destination string `json:"destination"` Region string `json:"region,omitempty" tf:"group:location"` Endpoint string `json:"endpoint,omitempty" tf:"group:location"` EnableEncryption bool `json:"enable_encryption,omitempty"` EncryptionType string `json:"encryption_type,omitempty"` KmsKey string `json:"kms_key,omitempty"` CannedACL string `json:"canned_acl,omitempty"` }
S3StorageInfo contains the struct for when storing files in S3
type SortOrder ¶
type SortOrder string
SortOrder - constants for API https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterlistorder
type SparkJarTask ¶
type SparkJarTask struct { JarURI string `json:"jar_uri,omitempty"` MainClassName string `json:"main_class_name,omitempty"` Parameters []string `json:"parameters,omitempty"` }
SparkJarTask contains the information for jar jobs
type SparkNode ¶
type SparkNode struct { PrivateIP string `json:"private_ip,omitempty"` PublicDNS string `json:"public_dns,omitempty"` NodeID string `json:"node_id,omitempty"` InstanceID string `json:"instance_id,omitempty"` StartTimestamp int64 `json:"start_timestamp,omitempty"` NodeAwsAttributes *SparkNodeAwsAttributes `json:"node_aws_attributes,omitempty"` HostPrivateIP string `json:"host_private_ip,omitempty"` }
SparkNode encapsulates all the attributes of a node that is part of a databricks cluster
type SparkNodeAwsAttributes ¶
type SparkNodeAwsAttributes struct {
IsSpot bool `json:"is_spot,omitempty"`
}
SparkNodeAwsAttributes is the struct that determines if the node is a spot instance or not
type SparkPythonTask ¶
type SparkPythonTask struct { PythonFile string `json:"python_file"` Parameters []string `json:"parameters,omitempty"` }
SparkPythonTask contains the information for python jobs
type SparkSubmitTask ¶
type SparkSubmitTask struct {
Parameters []string `json:"parameters,omitempty"`
}
SparkSubmitTask contains the information for spark submit jobs
type SparkVersion ¶
SparkVersion - contains information about specific version
type SparkVersionRequest ¶
type SparkVersionRequest struct { LongTermSupport bool `json:"long_term_support,omitempty" tf:"optional,default:false"` Beta bool `json:"beta,omitempty" tf:"optional,default:false,conflicts:long_term_support"` Latest bool `json:"latest,omitempty" tf:"optional,default:true"` ML bool `json:"ml,omitempty" tf:"optional,default:false"` Genomics bool `json:"genomics,omitempty" tf:"optional,default:false"` GPU bool `json:"gpu,omitempty" tf:"optional,default:false"` Scala string `json:"scala,omitempty" tf:"optional,default:2.12"` SparkVersion string `json:"spark_version,omitempty" tf:"optional,default:"` Photon bool `json:"photon,omitempty" tf:"optional,default:false"` }
SparkVersionRequest - filtering request
type SparkVersionsList ¶
type SparkVersionsList struct {
SparkVersions []SparkVersion `json:"versions"`
}
SparkVersionsList - returns a list of all currently supported Spark Versions https://docs.databricks.com/dev-tools/api/latest/clusters.html#runtime-versions
func (SparkVersionsList) LatestSparkVersion ¶
func (sparkVersions SparkVersionsList) LatestSparkVersion(req SparkVersionRequest) (string, error)
LatestSparkVersion returns latest version matching the request parameters
type StorageInfo ¶
type StorageInfo struct { Dbfs *DbfsStorageInfo `json:"dbfs,omitempty" tf:"group:storage"` S3 *S3StorageInfo `json:"s3,omitempty" tf:"group:storage"` }
StorageInfo contains the struct for either DBFS or S3 storage depending on which one is relevant.
type TaskDependency ¶ added in v0.3.9
type TaskDependency struct {
TaskKey string `json:"task_key,omitempty"`
}
type TerminationReason ¶
type TerminationReason struct { Code string `json:"code,omitempty"` Type string `json:"type,omitempty"` Parameters map[string]string `json:"parameters,omitempty"` }
TerminationReason encapsulates the termination code and potential parameters
type UpdateJobRequest ¶
type UpdateJobRequest struct { JobID int64 `json:"job_id,omitempty" url:"job_id,omitempty"` NewSettings *JobSettings `json:"new_settings,omitempty" url:"new_settings,omitempty"` }
UpdateJobRequest ...