Documentation
¶
Index ¶
- Constants
- func CommonEnvironmentClientWithRealCommandExecutor() *common.DatabricksClient
- func CommonInstancePoolID() string
- func DataSourceClusterZones() *schema.Resource
- func DataSourceNodeType() *schema.Resource
- func DataSourceSparkVersion() *schema.Resource
- func ResourceCluster() *schema.Resource
- func ResourceClusterPolicy() *schema.Resource
- func ResourceInstancePool() *schema.Resource
- func ResourceJob() *schema.Resource
- type AutoScale
- type AwsAttributes
- type AwsAvailability
- type AzureDiskVolumeType
- type Cluster
- type ClusterCloudProviderNodeInfo
- type ClusterEvent
- type ClusterEventType
- type ClusterID
- type ClusterInfo
- type ClusterLibraryList
- type ClusterLibraryStatuses
- type ClusterList
- type ClusterPoliciesAPI
- type ClusterPolicy
- type ClusterPolicyCreate
- type ClusterSize
- type ClusterState
- type ClustersAPI
- func (a ClustersAPI) Create(cluster Cluster) (info ClusterInfo, err error)
- func (a ClustersAPI) Edit(cluster Cluster) (info ClusterInfo, err error)
- func (a ClustersAPI) Events(eventsRequest EventsRequest) ([]ClusterEvent, error)
- func (a ClustersAPI) Get(clusterID string) (ci ClusterInfo, err error)
- func (a ClustersAPI) GetOrCreateRunningCluster(name string, custom ...Cluster) (c ClusterInfo, err error)
- func (a ClustersAPI) GetSmallestNodeType(r NodeTypeRequest) string
- func (a ClustersAPI) LatestSparkVersion(svr SparkVersionRequest) (string, error)
- func (a ClustersAPI) LatestSparkVersionOrDefault(svr SparkVersionRequest) string
- func (a ClustersAPI) List() ([]ClusterInfo, error)
- func (a ClustersAPI) ListNodeTypes() (l NodeTypeList, err error)
- func (a ClustersAPI) ListSparkVersions() (SparkVersionsList, error)
- func (a ClustersAPI) ListZones() (ZonesInfo, error)
- func (a ClustersAPI) PermanentDelete(clusterID string) error
- func (a ClustersAPI) Pin(clusterID string) error
- func (a ClustersAPI) Restart(clusterID string) error
- func (a ClustersAPI) Start(clusterID string) error
- func (a ClustersAPI) StartAndGetInfo(clusterID string) (ClusterInfo, error)
- func (a ClustersAPI) Terminate(clusterID string) error
- func (a ClustersAPI) Unpin(clusterID string) error
- type Command
- type CommandResults
- type CommandsAPI
- type Cran
- type CronSchedule
- type DbfsStorageInfo
- type DockerBasicAuth
- type DockerImage
- type EbsVolumeType
- type EventDetails
- type EventsRequest
- type EventsResponse
- type InstancePool
- type InstancePoolAndStats
- type InstancePoolAwsAttributes
- type InstancePoolDiskSpec
- type InstancePoolDiskType
- type InstancePoolList
- type InstancePoolStats
- type InstancePoolsAPI
- func (a InstancePoolsAPI) Create(instancePool InstancePool) (InstancePoolAndStats, error)
- func (a InstancePoolsAPI) Delete(instancePoolID string) error
- func (a InstancePoolsAPI) List() (ipl InstancePoolList, err error)
- func (a InstancePoolsAPI) Read(instancePoolID string) (ip InstancePool, err error)
- func (a InstancePoolsAPI) Update(ip InstancePool) error
- type Job
- type JobEmailNotifications
- type JobSettings
- type JobsAPI
- type LibrariesAPI
- type Library
- type LibraryStatus
- type LogSyncStatus
- type Maven
- type NodeInstanceType
- type NodeType
- type NodeTypeList
- type NodeTypeRequest
- type NotebookTask
- type PyPi
- type ResizeCause
- type S3StorageInfo
- type SortOrder
- type SparkJarTask
- type SparkNode
- type SparkNodeAwsAttributes
- type SparkPythonTask
- type SparkSubmitTask
- type SparkVersion
- type SparkVersionRequest
- type SparkVersionsList
- type StorageInfo
- type TerminationReason
- type UpdateJobRequest
- type ZonesInfo
Constants ¶
const ( // AwsAvailabilitySpot is spot instance type for clusters AwsAvailabilitySpot = "SPOT" // AwsAvailabilityOnDemand is OnDemand instance type for clusters AwsAvailabilityOnDemand = "ON_DEMAND" // AwsAvailabilitySpotWithFallback is Spot instance type for clusters with option // to fallback into on-demand if instance cannot be acquired AwsAvailabilitySpotWithFallback = "SPOT_WITH_FALLBACK" )
const ( // AzureDiskVolumeTypeStandard is for standard local redundant storage AzureDiskVolumeTypeStandard = "STANDARD_LRS" // AzureDiskVolumeTypePremium is for premium local redundant storage AzureDiskVolumeTypePremium = "PREMIUM_LRS" )
const ( // EbsVolumeTypeGeneralPurposeSsd is general purpose ssd (starts at 32 gb) EbsVolumeTypeGeneralPurposeSsd = "GENERAL_PURPOSE_SSD" // EbsVolumeTypeThroughputOptimizedHdd is throughput optimized hdd (starts at 500 gb) EbsVolumeTypeThroughputOptimizedHdd = "THROUGHPUT_OPTIMIZED_HDD" )
const ( // ClusterStatePending Indicates that a cluster is in the process of being created. ClusterStatePending = "PENDING" // ClusterStateRunning Indicates that a cluster has been started and is ready for use. ClusterStateRunning = "RUNNING" // ClusterStateRestarting Indicates that a cluster is in the process of restarting. ClusterStateRestarting = "RESTARTING" // ClusterStateResizing Indicates that a cluster is in the process of adding or removing nodes. ClusterStateResizing = "RESIZING" // ClusterStateTerminating Indicates that a cluster is in the process of being destroyed. ClusterStateTerminating = "TERMINATING" // ClusterStateTerminated Indicates that a cluster has been successfully destroyed. ClusterStateTerminated = "TERMINATED" // ClusterStateError This state is not used anymore. It was used to indicate a cluster // that failed to be created. Terminating and Terminated are used instead. ClusterStateError = "ERROR" // ClusterStateUnknown Indicates that a cluster is in an unknown state. A cluster should never be in this state. ClusterStateUnknown = "UNKNOWN" )
const DefaultProvisionTimeout = 30 * time.Minute
DefaultProvisionTimeout ...
Variables ¶
This section is empty.
Functions ¶
func CommonEnvironmentClientWithRealCommandExecutor ¶
func CommonEnvironmentClientWithRealCommandExecutor() *common.DatabricksClient
CommonEnvironmentClientWithRealCommandExecutor is good for internal tests
func CommonInstancePoolID ¶
func CommonInstancePoolID() string
CommonInstancePoolID returns common instance pool that is supposed to be used for internal testing purposes
func DataSourceClusterZones ¶
DataSourceClusterZones ...
func DataSourceNodeType ¶ added in v0.2.8
DataSourceNodeType returns smallest node depedning on the cloud
func DataSourceSparkVersion ¶ added in v0.3.0
DataSourceSparkVersion returns DBR version matching to the specification
func ResourceCluster ¶
ResourceCluster - returns Cluster resource description
func ResourceClusterPolicy ¶
ResourceClusterPolicy ...
Types ¶
type AutoScale ¶
type AutoScale struct { MinWorkers int32 `json:"min_workers,omitempty"` MaxWorkers int32 `json:"max_workers,omitempty"` }
AutoScale is a struct the describes auto scaling for clusters
type AwsAttributes ¶
type AwsAttributes struct { FirstOnDemand int32 `json:"first_on_demand,omitempty" tf:"computed"` Availability AwsAvailability `json:"availability,omitempty" tf:"computed"` ZoneID string `json:"zone_id,omitempty" tf:"computed"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` SpotBidPricePercent int32 `json:"spot_bid_price_percent,omitempty" tf:"computed"` EbsVolumeType EbsVolumeType `json:"ebs_volume_type,omitempty" tf:"computed"` EbsVolumeCount int32 `json:"ebs_volume_count,omitempty" tf:"computed"` EbsVolumeSize int32 `json:"ebs_volume_size,omitempty" tf:"computed"` }
AwsAttributes encapsulates the aws attributes for aws based clusters https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterclusterattributes
type AwsAvailability ¶
type AwsAvailability string
AwsAvailability is a type for describing AWS availability on cluster nodes
type AzureDiskVolumeType ¶
type AzureDiskVolumeType string
AzureDiskVolumeType is disk type on azure vms
type Cluster ¶
type Cluster struct { ClusterID string `json:"cluster_id,omitempty"` ClusterName string `json:"cluster_name,omitempty"` SparkVersion string `json:"spark_version"` // TODO: perhaps make a default NumWorkers int32 `json:"num_workers" tf:"group:size"` Autoscale *AutoScale `json:"autoscale,omitempty" tf:"group:size"` EnableElasticDisk bool `json:"enable_elastic_disk,omitempty" tf:"computed"` EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` NodeTypeID string `json:"node_type_id,omitempty" tf:"group:node_type,computed"` DriverNodeTypeID string `json:"driver_node_type_id,omitempty" tf:"conflicts:instance_pool_id,computed"` InstancePoolID string `json:"instance_pool_id,omitempty" tf:"group:node_type"` PolicyID string `json:"policy_id,omitempty"` AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty" tf:"conflicts:instance_pool_id"` AutoterminationMinutes int32 `json:"autotermination_minutes,omitempty"` SparkConf map[string]string `json:"spark_conf,omitempty"` SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` SSHPublicKeys []string `json:"ssh_public_keys,omitempty" tf:"max_items:10"` InitScripts []StorageInfo `json:"init_scripts,omitempty" tf:"max_items:10"` // TODO: tf:alias ClusterLogConf *StorageInfo `json:"cluster_log_conf,omitempty"` DockerImage *DockerImage `json:"docker_image,omitempty"` SingleUserName string `json:"single_user_name,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` }
Cluster contains the information when trying to submit api calls or editing a cluster
type ClusterCloudProviderNodeInfo ¶
type ClusterCloudProviderNodeInfo struct { Status []string `json:"status,omitempty"` AvailableCoreQuota float32 `json:"available_core_quota,omitempty"` TotalCoreQuota float32 `json:"total_core_quota,omitempty"` }
ClusterCloudProviderNodeInfo encapsulates the existing quota available from the cloud service provider.
type ClusterEvent ¶ added in v0.2.8
type ClusterEvent struct { ClusterID string `json:"cluster_id"` Timestamp int64 `json:"timestamp"` Type ClusterEventType `json:"type"` Details EventDetails `json:"details"` }
ClusterEvent - event information https://docs.databricks.com/dev-tools/api/latest/clusters.html#clustereventsclusterevent
type ClusterEventType ¶ added in v0.2.8
type ClusterEventType string
ClusterEventType - constants for API
const ( EvTypeCreating ClusterEventType = "CREATING" EvTypeDidNotExpandDisk ClusterEventType = "DID_NOT_EXPAND_DISK" EvTypeExpandedDisk ClusterEventType = "EXPANDED_DISK" EvTypeFailedToExpandDisk ClusterEventType = "FAILED_TO_EXPAND_DISK" EvTypeInitScriptsStarting ClusterEventType = "INIT_SCRIPTS_STARTING" EvTypeInitScriptsFinished ClusterEventType = "INIT_SCRIPTS_FINISHED" EvTypeStarting ClusterEventType = "STARTING" EvTypeRestarting ClusterEventType = "RESTARTING" EvTypeTerminating ClusterEventType = "TERMINATING" EvTypeEdited ClusterEventType = "EDITED" EvTypeRunning ClusterEventType = "RUNNING" EvTypeResizing ClusterEventType = "RESIZING" EvTypeUpsizeCompleted ClusterEventType = "UPSIZE_COMPLETED" EvTypeNodesLost ClusterEventType = "NODES_LOST" EvTypeDriverHealthy ClusterEventType = "DRIVER_HEALTHY" EvTypeSparkException ClusterEventType = "SPARK_EXCEPTION" EvTypeDriverNotResponding ClusterEventType = "DRIVER_NOT_RESPONDING" EvTypeDbfsDown ClusterEventType = "DBFS_DOWN" EvTypeMetastoreDown ClusterEventType = "METASTORE_DOWN" EvTypeNodeBlacklisted ClusterEventType = "NODE_BLACKLISTED" EvTypePinned ClusterEventType = "PINNED" EvTypeUnpinned ClusterEventType = "UNPINNED" )
Constants for Event Types
type ClusterID ¶
type ClusterID struct {
ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"`
}
ClusterID holds cluster ID
type ClusterInfo ¶
type ClusterInfo struct { NumWorkers int32 `json:"num_workers,omitempty"` AutoScale *AutoScale `json:"autoscale,omitempty"` ClusterID string `json:"cluster_id,omitempty"` CreatorUserName string `json:"creator_user_name,omitempty"` Driver *SparkNode `json:"driver,omitempty"` Executors []SparkNode `json:"executors,omitempty"` SparkContextID int64 `json:"spark_context_id,omitempty"` JdbcPort int32 `json:"jdbc_port,omitempty"` ClusterName string `json:"cluster_name,omitempty"` SparkVersion string `json:"spark_version"` SparkConf map[string]string `json:"spark_conf,omitempty"` AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` NodeTypeID string `json:"node_type_id,omitempty"` DriverNodeTypeID string `json:"driver_node_type_id,omitempty"` SSHPublicKeys []string `json:"ssh_public_keys,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` ClusterLogConf *StorageInfo `json:"cluster_log_conf,omitempty"` InitScripts []StorageInfo `json:"init_scripts,omitempty"` SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` AutoterminationMinutes int32 `json:"autotermination_minutes,omitempty"` EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` InstancePoolID string `json:"instance_pool_id,omitempty"` PolicyID string `json:"policy_id,omitempty"` SingleUserName string `json:"single_user_name,omitempty"` ClusterSource AwsAvailability `json:"cluster_source,omitempty"` DockerImage *DockerImage `json:"docker_image,omitempty"` State ClusterState `json:"state"` StateMessage string `json:"state_message,omitempty"` StartTime int64 `json:"start_time,omitempty"` TerminateTime int64 `json:"terminate_time,omitempty"` LastStateLossTime int64 `json:"last_state_loss_time,omitempty"` LastActivityTime int64 `json:"last_activity_time,omitempty"` ClusterMemoryMb int64 `json:"cluster_memory_mb,omitempty"` ClusterCores float32 `json:"cluster_cores,omitempty"` DefaultTags map[string]string `json:"default_tags"` ClusterLogStatus *LogSyncStatus `json:"cluster_log_status,omitempty"` TerminationReason *TerminationReason `json:"termination_reason,omitempty"` }
ClusterInfo contains the information when getting cluster info from the get request.
func NewTinyClusterInCommonPool ¶
func NewTinyClusterInCommonPool() (c ClusterInfo, err error)
NewTinyClusterInCommonPool creates new cluster for short-lived purposes
func NewTinyClusterInCommonPoolPossiblyReused ¶
func NewTinyClusterInCommonPoolPossiblyReused() (c ClusterInfo)
NewTinyClusterInCommonPoolPossiblyReused is recommended to be used for testing only
func (*ClusterInfo) IsRunningOrResizing ¶
func (ci *ClusterInfo) IsRunningOrResizing() bool
IsRunningOrResizing returns true if cluster is running or resizing
type ClusterLibraryList ¶
type ClusterLibraryList struct { ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"` Libraries []Library `json:"libraries,omitempty" url:"libraries,omitempty" tf:"slice_set,alias:library"` }
ClusterLibraryList is request body for install and uninstall
func (*ClusterLibraryList) Diff ¶
func (cll *ClusterLibraryList) Diff(cls ClusterLibraryStatuses) (ClusterLibraryList, ClusterLibraryList)
Diff returns install/uninstall lists given a cluster lib status
type ClusterLibraryStatuses ¶
type ClusterLibraryStatuses struct { ClusterID string `json:"cluster_id,omitempty"` LibraryStatuses []LibraryStatus `json:"library_statuses,omitempty"` }
ClusterLibraryStatuses A status will be available for all libraries installed on the cluster via the API or the libraries UI as well as libraries set to be installed on all clusters via the libraries UI. If a library has been set to be installed on all clusters, is_library_for_all_clusters will be true, even if the library was also installed on the cluster.
func (ClusterLibraryStatuses) IsRetryNeeded ¶
func (cls ClusterLibraryStatuses) IsRetryNeeded() (bool, error)
IsRetryNeeded returns first bool if there needs to be retry. If there needs to be retry, error message will explain why. If retry does not need to happen and error is not nil - it failed.
func (ClusterLibraryStatuses) ToLibraryList ¶
func (cls ClusterLibraryStatuses) ToLibraryList() ClusterLibraryList
ToLibraryList convert to envity for convenient comparison
type ClusterList ¶ added in v0.2.5
type ClusterList struct {
Clusters []ClusterInfo `json:"clusters,omitempty"`
}
ClusterList shows existing clusters
type ClusterPoliciesAPI ¶
type ClusterPoliciesAPI struct {
// contains filtered or unexported fields
}
ClusterPoliciesAPI struct for cluster policies API
func NewClusterPoliciesAPI ¶
func NewClusterPoliciesAPI(ctx context.Context, m interface{}) ClusterPoliciesAPI
NewClusterPoliciesAPI creates ClusterPoliciesAPI instance from provider meta Creation and editing is available to admins only.
func (ClusterPoliciesAPI) Create ¶
func (a ClusterPoliciesAPI) Create(clusterPolicy *ClusterPolicy) error
Create creates new cluster policy and sets PolicyID
func (ClusterPoliciesAPI) Delete ¶
func (a ClusterPoliciesAPI) Delete(policyID string) error
Delete removes cluster policy
func (ClusterPoliciesAPI) Edit ¶
func (a ClusterPoliciesAPI) Edit(clusterPolicy *ClusterPolicy) error
Edit will update an existing policy. This may make some clusters governed by this policy invalid. For such clusters the next cluster edit must provide a confirming configuration, but otherwise they can continue to run.
func (ClusterPoliciesAPI) Get ¶
func (a ClusterPoliciesAPI) Get(policyID string) (policy ClusterPolicy, err error)
Get returns cluster policy
type ClusterPolicy ¶
type ClusterPolicy struct { PolicyID string `json:"policy_id,omitempty"` Name string `json:"name"` Definition string `json:"definition"` CreatedAtTimeStamp int64 `json:"created_at_timestamp"` }
ClusterPolicy defines cluster policy
type ClusterPolicyCreate ¶
ClusterPolicyCreate is the endity used for request
type ClusterSize ¶ added in v0.2.8
type ClusterSize struct { NumWorkers int32 `json:"num_workers"` AutoScale *AutoScale `json:"autoscale"` }
ClusterSize is structure to keep https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterclustersize
type ClusterState ¶
type ClusterState string
ClusterState is for describing possible cluster states
func (ClusterState) CanReach ¶
func (state ClusterState) CanReach(desired ClusterState) bool
CanReach returns true if cluster state can reach desired state
type ClustersAPI ¶
type ClustersAPI struct {
// contains filtered or unexported fields
}
ClustersAPI is a struct that contains the Databricks api client to perform queries
func NewClustersAPI ¶
func NewClustersAPI(ctx context.Context, m interface{}) ClustersAPI
NewClustersAPI creates ClustersAPI instance from provider meta
func (ClustersAPI) Create ¶
func (a ClustersAPI) Create(cluster Cluster) (info ClusterInfo, err error)
Create creates a new Spark cluster and waits till it's running
func (ClustersAPI) Edit ¶
func (a ClustersAPI) Edit(cluster Cluster) (info ClusterInfo, err error)
Edit edits the configuration of a cluster to match the provided attributes and size
func (ClustersAPI) Events ¶ added in v0.2.8
func (a ClustersAPI) Events(eventsRequest EventsRequest) ([]ClusterEvent, error)
Events - only using Cluster ID string to get all events https://docs.databricks.com/dev-tools/api/latest/clusters.html#events
func (ClustersAPI) Get ¶
func (a ClustersAPI) Get(clusterID string) (ci ClusterInfo, err error)
Get retrieves the information for a cluster given its identifier
func (ClustersAPI) GetOrCreateRunningCluster ¶
func (a ClustersAPI) GetOrCreateRunningCluster(name string, custom ...Cluster) (c ClusterInfo, err error)
GetOrCreateRunningCluster creates an autoterminating cluster if it doesn't exist
func (ClustersAPI) GetSmallestNodeType ¶ added in v0.2.8
func (a ClustersAPI) GetSmallestNodeType(r NodeTypeRequest) string
GetSmallestNodeType returns smallest (or default) node type id given the criteria
func (ClustersAPI) LatestSparkVersion ¶ added in v0.3.0
func (a ClustersAPI) LatestSparkVersion(svr SparkVersionRequest) (string, error)
LatestSparkVersion returns latest version matching the request parameters
func (ClustersAPI) LatestSparkVersionOrDefault ¶ added in v0.3.0
func (a ClustersAPI) LatestSparkVersionOrDefault(svr SparkVersionRequest) string
LatestSparkVersionOrDefault returns Spark version matching the definition, or default in case of error
func (ClustersAPI) List ¶
func (a ClustersAPI) List() ([]ClusterInfo, error)
List return information about all pinned clusters, currently active clusters, up to 70 of the most recently terminated interactive clusters in the past 30 days, and up to 30 of the most recently terminated job clusters in the past 30 days
func (ClustersAPI) ListNodeTypes ¶
func (a ClustersAPI) ListNodeTypes() (l NodeTypeList, err error)
ListNodeTypes returns a sorted list of supported Spark node types
func (ClustersAPI) ListSparkVersions ¶ added in v0.3.0
func (a ClustersAPI) ListSparkVersions() (SparkVersionsList, error)
ListSparkVersions returns smallest (or default) node type id given the criteria
func (ClustersAPI) ListZones ¶
func (a ClustersAPI) ListZones() (ZonesInfo, error)
ListZones returns the zones info sent by the cloud service provider
func (ClustersAPI) PermanentDelete ¶
func (a ClustersAPI) PermanentDelete(clusterID string) error
PermanentDelete permanently delete a cluster
func (ClustersAPI) Pin ¶
func (a ClustersAPI) Pin(clusterID string) error
Pin ensure that an interactive cluster configuration is retained even after a cluster has been terminated for more than 30 days
func (ClustersAPI) Restart ¶
func (a ClustersAPI) Restart(clusterID string) error
Restart restart a Spark cluster given its ID. If the cluster is not in a RUNNING state, nothing will happen.
func (ClustersAPI) Start ¶
func (a ClustersAPI) Start(clusterID string) error
Start a terminated Spark cluster given its ID and wait till it's running
func (ClustersAPI) StartAndGetInfo ¶
func (a ClustersAPI) StartAndGetInfo(clusterID string) (ClusterInfo, error)
StartAndGetInfo starts cluster and returns info
func (ClustersAPI) Terminate ¶
func (a ClustersAPI) Terminate(clusterID string) error
Terminate terminates a Spark cluster given its ID
func (ClustersAPI) Unpin ¶
func (a ClustersAPI) Unpin(clusterID string) error
Unpin allows the cluster to eventually be removed from the list returned by the List API
type Command ¶
type Command struct { ID string `json:"id,omitempty"` Status string `json:"status,omitempty"` Results *CommandResults `json:"results,omitempty"` }
Command is the struct that contains what the 1.2 api returns for the commands api
type CommandResults ¶
type CommandResults struct { ResultType string `json:"resultType,omitempty"` Summary string `json:"summary,omitempty"` Cause string `json:"cause,omitempty"` Data interface{} `json:"data,omitempty"` Schema interface{} `json:"schema,omitempty"` Truncated bool `json:"truncated,omitempty"` IsJSONSchema bool `json:"isJsonSchema,omitempty"` }
CommandResults is the out put when the command finishes in API 1.2
type CommandsAPI ¶
type CommandsAPI struct {
// contains filtered or unexported fields
}
CommandsAPI exposes the Context & Commands API
func NewCommandsAPI ¶
func NewCommandsAPI(ctx context.Context, m interface{}) CommandsAPI
NewCommandsAPI creates CommandsAPI instance from provider meta
type CronSchedule ¶
type CronSchedule struct { QuartzCronExpression string `json:"quartz_cron_expression"` TimezoneID string `json:"timezone_id"` PauseStatus string `json:"pause_status,omitempty" tf:"computed"` }
CronSchedule contains the information for the quartz cron expression
type DbfsStorageInfo ¶
type DbfsStorageInfo struct {
Destination string `json:"destination"`
}
DbfsStorageInfo contains the destination string for DBFS
type DockerBasicAuth ¶
DockerBasicAuth contains the auth information when fetching containers
type DockerImage ¶
type DockerImage struct { URL string `json:"url"` BasicAuth *DockerBasicAuth `json:"basic_auth,omitempty"` }
DockerImage contains the image url and the auth for DCS
type EventDetails ¶ added in v0.2.8
type EventDetails struct { CurrentNumWorkers int32 `json:"current_num_workers,omitempty"` TargetNumWorkers int32 `json:"target_num_workers,omitempty"` PreviousAttributes *AwsAttributes `json:"previous_attributes,omitempty"` Attributes *AwsAttributes `json:"attributes,omitempty"` PreviousClusterSize *ClusterSize `json:"previous_cluster_size,omitempty"` ClusterSize *ClusterSize `json:"cluster_size,omitempty"` ResizeCause *ResizeCause `json:"cause,omitempty"` Reason *TerminationReason `json:"reason,omitempty"` User string `json:"user"` }
EventDetails - details about specific events https://docs.databricks.com/dev-tools/api/latest/clusters.html#clustereventseventdetails
type EventsRequest ¶ added in v0.2.8
type EventsRequest struct { ClusterID string `json:"cluster_id"` StartTime int64 `json:"start_time,omitempty"` EndTime int64 `json:"end_time,omitempty"` Order SortOrder `json:"order,omitempty"` EventTypes []ClusterEventType `json:"event_types,omitempty"` Offset int64 `json:"offset,omitempty"` Limit int64 `json:"limit,omitempty"` MaxItems uint `json:"-"` }
EventsRequest - request structure https://docs.databricks.com/dev-tools/api/latest/clusters.html#request-structure
type EventsResponse ¶ added in v0.2.8
type EventsResponse struct { Events []ClusterEvent `json:"events"` NextPage *EventsRequest `json:"next_page"` TotalCount int64 `json:"total_count"` }
EventsResponse - answer from API https://docs.databricks.com/dev-tools/api/latest/clusters.html#response-structure
type InstancePool ¶
type InstancePool struct { InstancePoolID string `json:"instance_pool_id,omitempty" tf:"computed"` InstancePoolName string `json:"instance_pool_name"` MinIdleInstances int32 `json:"min_idle_instances,omitempty"` MaxCapacity int32 `json:"max_capacity,omitempty"` IdleInstanceAutoTerminationMinutes int32 `json:"idle_instance_autotermination_minutes"` AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"` NodeTypeID string `json:"node_type_id"` CustomTags map[string]string `json:"custom_tags,omitempty"` EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` DiskSpec *InstancePoolDiskSpec `json:"disk_spec,omitempty"` PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"` }
InstancePool describes the instance pool object on Databricks
type InstancePoolAndStats ¶
type InstancePoolAndStats struct { InstancePoolID string `json:"instance_pool_id,omitempty" tf:"computed"` InstancePoolName string `json:"instance_pool_name"` MinIdleInstances int32 `json:"min_idle_instances,omitempty"` MaxCapacity int32 `json:"max_capacity,omitempty"` AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"` NodeTypeID string `json:"node_type_id"` DefaultTags map[string]string `json:"default_tags,omitempty" tf:"computed"` CustomTags map[string]string `json:"custom_tags,omitempty"` IdleInstanceAutoTerminationMinutes int32 `json:"idle_instance_autotermination_minutes"` EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` DiskSpec *InstancePoolDiskSpec `json:"disk_spec,omitempty"` PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"` State string `json:"state,omitempty"` Stats *InstancePoolStats `json:"stats,omitempty"` }
InstancePoolAndStats encapsulates a get response from the GET api for instance pools on Databricks
type InstancePoolAwsAttributes ¶
type InstancePoolAwsAttributes struct { Availability AwsAvailability `json:"availability,omitempty"` ZoneID string `json:"zone_id"` SpotBidPricePercent int32 `json:"spot_bid_price_percent,omitempty"` }
InstancePoolAwsAttributes contains aws attributes for AWS Databricks deployments for instance pools
type InstancePoolDiskSpec ¶
type InstancePoolDiskSpec struct { DiskType *InstancePoolDiskType `json:"disk_type,omitempty"` DiskCount int32 `json:"disk_count,omitempty"` DiskSize int32 `json:"disk_size,omitempty"` }
InstancePoolDiskSpec contains disk size, type and count information for the pool
type InstancePoolDiskType ¶
type InstancePoolDiskType struct { AzureDiskVolumeType string `json:"azure_disk_volume_type,omitempty"` EbsVolumeType string `json:"ebs_volume_type,omitempty"` }
InstancePoolDiskType contains disk type information for each of the different cloud service providers
type InstancePoolList ¶
type InstancePoolList struct {
InstancePools []InstancePoolAndStats `json:"instance_pools"`
}
InstancePoolList shows list of instance pools
type InstancePoolStats ¶
type InstancePoolStats struct { UsedCount int32 `json:"used_count,omitempty"` IdleCount int32 `json:"idle_count,omitempty"` PendingUsedCount int32 `json:"pending_used_count,omitempty"` PendingIdleCount int32 `json:"pending_idle_count,omitempty"` }
InstancePoolStats contains the stats on a given pool
type InstancePoolsAPI ¶
type InstancePoolsAPI struct {
// contains filtered or unexported fields
}
InstancePoolsAPI exposes the instance pools api
func NewInstancePoolsAPI ¶
func NewInstancePoolsAPI(ctx context.Context, m interface{}) InstancePoolsAPI
NewInstancePoolsAPI creates InstancePoolsAPI instance from provider meta
func (InstancePoolsAPI) Create ¶
func (a InstancePoolsAPI) Create(instancePool InstancePool) (InstancePoolAndStats, error)
Create creates the instance pool to given the instance pool configuration
func (InstancePoolsAPI) Delete ¶
func (a InstancePoolsAPI) Delete(instancePoolID string) error
Delete terminates a instance pool given its ID
func (InstancePoolsAPI) List ¶
func (a InstancePoolsAPI) List() (ipl InstancePoolList, err error)
List retrieves the list of existing instance pools
func (InstancePoolsAPI) Read ¶
func (a InstancePoolsAPI) Read(instancePoolID string) (ip InstancePool, err error)
Read retrieves the information for a instance pool given its identifier
func (InstancePoolsAPI) Update ¶
func (a InstancePoolsAPI) Update(ip InstancePool) error
Update edits the configuration of a instance pool to match the provided attributes and size
type Job ¶
type Job struct { JobID int64 `json:"job_id,omitempty"` CreatorUserName string `json:"creator_user_name,omitempty"` Settings *JobSettings `json:"settings,omitempty"` CreatedTime int64 `json:"created_time,omitempty"` }
Job contains the information when using a GET request from the Databricks Jobs api
type JobEmailNotifications ¶
type JobEmailNotifications struct { OnStart []string `json:"on_start,omitempty"` OnSuccess []string `json:"on_success,omitempty"` OnFailure []string `json:"on_failure,omitempty"` NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` }
JobEmailNotifications contains the information for email notifications after job completion
type JobSettings ¶
type JobSettings struct { Name string `json:"name,omitempty" tf:"default:Untitled"` ExistingClusterID string `json:"existing_cluster_id,omitempty" tf:"group:cluster_type"` NewCluster *Cluster `json:"new_cluster,omitempty" tf:"group:cluster_type"` NotebookTask *NotebookTask `json:"notebook_task,omitempty" tf:"group:task_type"` SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty" tf:"group:task_type"` SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty" tf:"group:task_type"` SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty" tf:"group:task_type"` Libraries []Library `json:"libraries,omitempty" tf:"slice_set,alias:library"` TimeoutSeconds int32 `json:"timeout_seconds,omitempty"` MaxRetries int32 `json:"max_retries,omitempty"` MinRetryIntervalMillis int32 `json:"min_retry_interval_millis,omitempty"` RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` Schedule *CronSchedule `json:"schedule,omitempty"` MaxConcurrentRuns int32 `json:"max_concurrent_runs,omitempty"` EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` }
JobSettings contains the information for configuring a job on databricks
type JobsAPI ¶
type JobsAPI struct {
// contains filtered or unexported fields
}
JobsAPI exposes the Jobs API
func NewJobsAPI ¶
NewJobsAPI creates JobsAPI instance from provider meta
func (JobsAPI) Create ¶
func (a JobsAPI) Create(jobSettings JobSettings) (Job, error)
Create creates a job on the workspace given the job settings
type LibrariesAPI ¶
type LibrariesAPI struct {
// contains filtered or unexported fields
}
LibrariesAPI exposes the Library API
func NewLibrariesAPI ¶
func NewLibrariesAPI(ctx context.Context, m interface{}) LibrariesAPI
NewLibrariesAPI creates LibrariesAPI instance from provider meta
func (LibrariesAPI) ClusterStatus ¶
func (a LibrariesAPI) ClusterStatus(clusterID string) (cls ClusterLibraryStatuses, err error)
ClusterStatus returns library status in cluster
func (LibrariesAPI) Install ¶
func (a LibrariesAPI) Install(req ClusterLibraryList) error
Install library list on cluster
func (LibrariesAPI) Uninstall ¶
func (a LibrariesAPI) Uninstall(req ClusterLibraryList) error
Uninstall library list from cluster
type Library ¶
type Library struct { Jar string `json:"jar,omitempty" tf:"group:lib"` Egg string `json:"egg,omitempty" tf:"group:lib"` // TODO: add name validation for wheel libraries. Whl string `json:"whl,omitempty" tf:"group:lib"` Pypi *PyPi `json:"pypi,omitempty" tf:"group:lib"` Maven *Maven `json:"maven,omitempty" tf:"group:lib"` Cran *Cran `json:"cran,omitempty" tf:"group:lib"` }
Library is a construct that contains information of the location of the library and how to download it
func (Library) TypeAndKey ¶
TypeAndKey can be used for computing differences
type LibraryStatus ¶
type LibraryStatus struct { Library *Library `json:"library,omitempty"` Status string `json:"status,omitempty"` IsLibraryInstalledOnAllClusters bool `json:"is_library_for_all_clusters,omitempty"` Messages []string `json:"messages,omitempty"` }
LibraryStatus is the status on a given cluster when using the libraries status api
type LogSyncStatus ¶
type LogSyncStatus struct { LastAttempted int64 `json:"last_attempted,omitempty"` LastException string `json:"last_exception,omitempty"` }
LogSyncStatus encapsulates when the cluster logs were last delivered.
type Maven ¶
type Maven struct { Coordinates string `json:"coordinates"` Repo string `json:"repo,omitempty"` Exclusions []string `json:"exclusions,omitempty"` }
Maven is a jar library hosted on Maven
type NodeInstanceType ¶
type NodeInstanceType struct { InstanceTypeID string `json:"instance_type_id,omitempty"` LocalDisks int32 `json:"local_disks,omitempty"` LocalDiskSizeGB int32 `json:"local_disk_size_gb,omitempty"` LocalNVMeDisks int32 `json:"local_nvme_disks,omitempty"` LocalNVMeDiskSizeGB int32 `json:"local_nvme_disk_size_gb,omitempty"` }
NodeInstanceType encapsulates information about a specific node type
type NodeType ¶
type NodeType struct { NodeTypeID string `json:"node_type_id,omitempty"` MemoryMB int32 `json:"memory_mb,omitempty"` NumCores float32 `json:"num_cores,omitempty"` NumGPUs int32 `json:"num_gpus,omitempty"` SupportEBSVolumes bool `json:"support_ebs_volumes,omitempty"` IsIOCacheEnabled bool `json:"is_io_cache_enabled,omitempty"` SupportPortForwarding bool `json:"support_port_forwarding,omitempty"` Description string `json:"description,omitempty"` Category string `json:"category,omitempty"` InstanceTypeID string `json:"instance_type_id,omitempty"` IsDeprecated bool `json:"is_deprecated,omitempty"` IsHidden bool `json:"is_hidden,omitempty"` SupportClusterTags bool `json:"support_cluster_tags,omitempty"` DisplayOrder int32 `json:"display_order,omitempty"` NodeInfo *ClusterCloudProviderNodeInfo `json:"node_info,omitempty"` NodeInstanceType *NodeInstanceType `json:"node_instance_type,omitempty"` }
NodeType encapsulates information about a given node when using the list-node-types api
type NodeTypeList ¶
type NodeTypeList struct {
NodeTypes []NodeType `json:"node_types,omitempty"`
}
NodeTypeList contains a list of node types
func (*NodeTypeList) Sort ¶ added in v0.2.8
func (l *NodeTypeList) Sort()
Sort NodeTypes within this struct
type NodeTypeRequest ¶ added in v0.2.8
type NodeTypeRequest struct { MinMemoryGB int32 `json:"min_memory_gb,omitempty"` GBPerCore int32 `json:"gb_per_core,omitempty"` MinCores int32 `json:"min_cores,omitempty"` MinGPUs int32 `json:"min_gpus,omitempty"` LocalDisk bool `json:"local_disk,omitempty"` Category string `json:"category,omitempty"` }
NodeTypeRequest is a wrapper for local filtering of node types
type NotebookTask ¶
type NotebookTask struct { NotebookPath string `json:"notebook_path"` BaseParameters map[string]string `json:"base_parameters,omitempty"` }
NotebookTask contains the information for notebook jobs
type S3StorageInfo ¶
type S3StorageInfo struct { // TODO: add instance profile validation check + prefix validation Destination string `json:"destination"` Region string `json:"region,omitempty" tf:"group:location"` Endpoint string `json:"endpoint,omitempty" tf:"group:location"` EnableEncryption bool `json:"enable_encryption,omitempty"` EncryptionType string `json:"encryption_type,omitempty"` KmsKey string `json:"kms_key,omitempty"` CannedACL string `json:"canned_acl,omitempty"` }
S3StorageInfo contains the struct for when storing files in S3
type SortOrder ¶ added in v0.2.8
type SortOrder string
SortOrder - constants for API https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterlistorder
type SparkJarTask ¶
type SparkJarTask struct { JarURI string `json:"jar_uri,omitempty"` MainClassName string `json:"main_class_name,omitempty"` Parameters []string `json:"parameters,omitempty"` }
SparkJarTask contains the information for jar jobs
type SparkNode ¶
type SparkNode struct { PrivateIP string `json:"private_ip,omitempty"` PublicDNS string `json:"public_dns,omitempty"` NodeID string `json:"node_id,omitempty"` InstanceID string `json:"instance_id,omitempty"` StartTimestamp int64 `json:"start_timestamp,omitempty"` NodeAwsAttributes *SparkNodeAwsAttributes `json:"node_aws_attributes,omitempty"` HostPrivateIP string `json:"host_private_ip,omitempty"` }
SparkNode encapsulates all the attributes of a node that is part of a databricks cluster
type SparkNodeAwsAttributes ¶
type SparkNodeAwsAttributes struct {
IsSpot bool `json:"is_spot,omitempty"`
}
SparkNodeAwsAttributes is the struct that determines if the node is a spot instance or not
type SparkPythonTask ¶
type SparkPythonTask struct { PythonFile string `json:"python_file"` Parameters []string `json:"parameters,omitempty"` }
SparkPythonTask contains the information for python jobs
type SparkSubmitTask ¶
type SparkSubmitTask struct {
Parameters []string `json:"parameters,omitempty"`
}
SparkSubmitTask contains the information for spark submit jobs
type SparkVersion ¶ added in v0.3.0
SparkVersion - contains information about specific version
type SparkVersionRequest ¶ added in v0.3.0
type SparkVersionRequest struct { LongTermSupport bool `json:"long_term_support,omitempty" tf:"optional,default:false"` Beta bool `json:"beta,omitempty" tf:"optional,default:false,conflicts:long_term_support"` Latest bool `json:"latest,omitempty" tf:"optional,default:true"` ML bool `json:"ml,omitempty" tf:"optional,default:false"` Genomics bool `json:"genomics,omitempty" tf:"optional,default:false"` GPU bool `json:"gpu,omitempty" tf:"optional,default:false"` Scala string `json:"scala,omitempty" tf:"optional,default:2.12"` SparkVersion string `json:"spark_version,omitempty" tf:"optional,default:"` }
SparkVersionRequest - filtering request
type SparkVersionsList ¶ added in v0.3.0
type SparkVersionsList struct {
SparkVersions []SparkVersion `json:"versions"`
}
SparkVersionsList - returns a list of all currently supported Spark Versions https://docs.databricks.com/dev-tools/api/latest/clusters.html#runtime-versions
func (SparkVersionsList) LatestSparkVersion ¶ added in v0.3.0
func (sparkVersions SparkVersionsList) LatestSparkVersion(req SparkVersionRequest) (string, error)
LatestSparkVersion returns latest version matching the request parameters
type StorageInfo ¶
type StorageInfo struct { Dbfs *DbfsStorageInfo `json:"dbfs,omitempty" tf:"group:storage"` S3 *S3StorageInfo `json:"s3,omitempty" tf:"group:storage"` }
StorageInfo contains the struct for either DBFS or S3 storage depending on which one is relevant.
type TerminationReason ¶
type TerminationReason struct { Code string `json:"code,omitempty"` Type string `json:"type,omitempty"` Parameters map[string]string `json:"parameters,omitempty"` }
TerminationReason encapsulates the termination code and potential parameters
type UpdateJobRequest ¶
type UpdateJobRequest struct { JobID int64 `json:"job_id,omitempty" url:"job_id,omitempty"` NewSettings *JobSettings `json:"new_settings,omitempty" url:"new_settings,omitempty"` }
UpdateJobRequest ...