sql

package
v0.13.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jul 18, 2023 License: Apache-2.0 Imports: 7 Imported by: 17

Documentation

Overview

These APIs allow you to manage Alerts, Dashboards, Data Sources, Dbsql Permissions, Queries, Query History, Statement Execution, Warehouses, etc.

Index

Examples

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type AccessControl

type AccessControl struct {
	GroupName string `json:"group_name,omitempty"`
	// This describes an enum
	PermissionLevel PermissionLevel `json:"permission_level,omitempty"`

	UserName string `json:"user_name,omitempty"`
}

type Alert

type Alert struct {
	// Timestamp when the alert was created.
	CreatedAt string `json:"created_at,omitempty"`
	// Alert ID.
	Id string `json:"id,omitempty"`
	// Timestamp when the alert was last triggered.
	LastTriggeredAt string `json:"last_triggered_at,omitempty"`
	// Name of the alert.
	Name string `json:"name,omitempty"`
	// Alert configuration options.
	Options *AlertOptions `json:"options,omitempty"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`

	Query *AlertQuery `json:"query,omitempty"`
	// Number of seconds after being triggered before the alert rearms itself
	// and can be triggered again. If `null`, alert will never be triggered
	// again.
	Rearm int `json:"rearm,omitempty"`
	// State of the alert. Possible values are: `unknown` (yet to be evaluated),
	// `triggered` (evaluated and fulfilled trigger conditions), or `ok`
	// (evaluated and did not fulfill trigger conditions).
	State AlertState `json:"state,omitempty"`
	// Timestamp when the alert was last updated.
	UpdatedAt string `json:"updated_at,omitempty"`

	User *User `json:"user,omitempty"`
}

type AlertOptions

type AlertOptions struct {
	// Name of column in the query result to compare in alert evaluation.
	Column string `json:"column"`
	// Custom body of alert notification, if it exists. See [here] for custom
	// templating instructions.
	//
	// [here]: https://docs.databricks.com/sql/user/alerts/index.html
	CustomBody string `json:"custom_body,omitempty"`
	// Custom subject of alert notification, if it exists. This includes email
	// subject, Slack notification header, etc. See [here] for custom templating
	// instructions.
	//
	// [here]: https://docs.databricks.com/sql/user/alerts/index.html
	CustomSubject string `json:"custom_subject,omitempty"`
	// Whether or not the alert is muted. If an alert is muted, it will not
	// notify users and notification destinations when triggered.
	Muted bool `json:"muted,omitempty"`
	// Operator used to compare in alert evaluation: `>`, `>=`, `<`, `<=`, `==`,
	// `!=`
	Op string `json:"op"`
	// Value used to compare in alert evaluation.
	Value any `json:"value"`
}

Alert configuration options.

type AlertQuery added in v0.13.0

type AlertQuery struct {
	// The timestamp when this query was created.
	CreatedAt string `json:"created_at,omitempty"`
	// Data source ID.
	DataSourceId string `json:"data_source_id,omitempty"`
	// General description that conveys additional information about this query
	// such as usage notes.
	Description string `json:"description,omitempty"`
	// Query ID.
	Id string `json:"id,omitempty"`
	// Indicates whether the query is trashed. Trashed queries can't be used in
	// dashboards, or appear in search results. If this boolean is `true`, the
	// `options` property for this query includes a `moved_to_trash_at`
	// timestamp. Trashed queries are permanently deleted after 30 days.
	IsArchived bool `json:"is_archived,omitempty"`
	// Whether the query is a draft. Draft queries only appear in list views for
	// their owners. Visualizations from draft queries cannot appear on
	// dashboards.
	IsDraft bool `json:"is_draft,omitempty"`
	// Text parameter types are not safe from SQL injection for all types of
	// data source. Set this Boolean parameter to `true` if a query either does
	// not use any text type parameters or uses a data source type where text
	// type parameters are handled safely.
	IsSafe bool `json:"is_safe,omitempty"`
	// The title of this query that appears in list views, widget headings, and
	// on the query page.
	Name string `json:"name,omitempty"`

	Options *QueryOptions `json:"options,omitempty"`
	// The text of the query to be run.
	Query string `json:"query,omitempty"`

	Tags []string `json:"tags,omitempty"`
	// The timestamp at which this query was last updated.
	UpdatedAt string `json:"updated_at,omitempty"`
	// The ID of the user who created this query.
	UserId int `json:"user_id,omitempty"`
}

type AlertState

type AlertState string

State of the alert. Possible values are: `unknown` (yet to be evaluated), `triggered` (evaluated and fulfilled trigger conditions), or `ok` (evaluated and did not fulfill trigger conditions).

const AlertStateOk AlertState = `ok`
const AlertStateTriggered AlertState = `triggered`
const AlertStateUnknown AlertState = `unknown`

func (*AlertState) Set

func (f *AlertState) Set(v string) error

Set raw string value and validate it against allowed values

func (*AlertState) String

func (f *AlertState) String() string

String representation for fmt.Print

func (*AlertState) Type

func (f *AlertState) Type() string

Type always returns AlertState to satisfy [pflag.Value] interface

type AlertsAPI

type AlertsAPI struct {
	// contains filtered or unexported fields
}

The alerts API can be used to perform CRUD operations on alerts. An alert is a Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies one or more users and/or notification destinations if the condition was met. Alerts can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

func NewAlerts

func NewAlerts(client *client.DatabricksClient) *AlertsAPI

func (*AlertsAPI) AlertNameToIdMap

func (a *AlertsAPI) AlertNameToIdMap(ctx context.Context) (map[string]string, error)

AlertNameToIdMap calls AlertsAPI.List and creates a map of results with Alert.Name as key and Alert.Id as value.

Returns an error if there's more than one Alert with the same .Name.

Note: All Alert instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*AlertsAPI) Create

func (a *AlertsAPI) Create(ctx context.Context, request CreateAlert) (*Alert, error)

Create an alert.

Creates an alert. An alert is a Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies users or notification destinations if the condition was met.

Example (Alerts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.QueryPostContent{
	Name:         fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	DataSourceId: srcs[0].Id,
	Description:  "test query from Go SDK",
	Query:        "SELECT 1",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

alert, err := w.Alerts.Create(ctx, sql.CreateAlert{
	Options: sql.AlertOptions{
		Column: "1",
		Op:     "==",
		Value:  "1",
	},
	Name:    fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	QueryId: query.Id,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", alert)

// cleanup

err = w.Queries.DeleteByQueryId(ctx, query.Id)
if err != nil {
	panic(err)
}
err = w.Alerts.DeleteByAlertId(ctx, alert.Id)
if err != nil {
	panic(err)
}
Output:

func (*AlertsAPI) Delete

func (a *AlertsAPI) Delete(ctx context.Context, request DeleteAlertRequest) error

Delete an alert.

Deletes an alert. Deleted alerts are no longer accessible and cannot be restored. **Note:** Unlike queries and dashboards, alerts cannot be moved to the trash.

func (*AlertsAPI) DeleteByAlertId

func (a *AlertsAPI) DeleteByAlertId(ctx context.Context, alertId string) error

Delete an alert.

Deletes an alert. Deleted alerts are no longer accessible and cannot be restored. **Note:** Unlike queries and dashboards, alerts cannot be moved to the trash.

func (*AlertsAPI) Get

func (a *AlertsAPI) Get(ctx context.Context, request GetAlertRequest) (*Alert, error)

Get an alert.

Gets an alert.

Example (Alerts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.QueryPostContent{
	Name:         fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	DataSourceId: srcs[0].Id,
	Description:  "test query from Go SDK",
	Query:        "SELECT 1",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

alert, err := w.Alerts.Create(ctx, sql.CreateAlert{
	Options: sql.AlertOptions{
		Column: "1",
		Op:     "==",
		Value:  "1",
	},
	Name:    fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	QueryId: query.Id,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", alert)

byId, err := w.Alerts.GetByAlertId(ctx, alert.Id)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = w.Queries.DeleteByQueryId(ctx, query.Id)
if err != nil {
	panic(err)
}
err = w.Alerts.DeleteByAlertId(ctx, alert.Id)
if err != nil {
	panic(err)
}
Output:

func (*AlertsAPI) GetByAlertId

func (a *AlertsAPI) GetByAlertId(ctx context.Context, alertId string) (*Alert, error)

Get an alert.

Gets an alert.

func (*AlertsAPI) GetByName

func (a *AlertsAPI) GetByName(ctx context.Context, name string) (*Alert, error)

GetByName calls AlertsAPI.AlertNameToIdMap and returns a single Alert.

Returns an error if there's more than one Alert with the same .Name.

Note: All Alert instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*AlertsAPI) Impl

func (a *AlertsAPI) Impl() AlertsService

Impl returns low-level Alerts API implementation

func (*AlertsAPI) List

func (a *AlertsAPI) List(ctx context.Context) ([]Alert, error)

Get alerts.

Gets a list of alerts.

func (*AlertsAPI) Update

func (a *AlertsAPI) Update(ctx context.Context, request EditAlert) error

Update an alert.

Updates an alert.

Example (Alerts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.QueryPostContent{
	Name:         fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	DataSourceId: srcs[0].Id,
	Description:  "test query from Go SDK",
	Query:        "SELECT 1",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

alert, err := w.Alerts.Create(ctx, sql.CreateAlert{
	Options: sql.AlertOptions{
		Column: "1",
		Op:     "==",
		Value:  "1",
	},
	Name:    fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	QueryId: query.Id,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", alert)

err = w.Alerts.Update(ctx, sql.EditAlert{
	Options: sql.AlertOptions{
		Column: "1",
		Op:     "==",
		Value:  "1",
	},
	AlertId: alert.Id,
	Name:    fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	QueryId: query.Id,
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Queries.DeleteByQueryId(ctx, query.Id)
if err != nil {
	panic(err)
}
err = w.Alerts.DeleteByAlertId(ctx, alert.Id)
if err != nil {
	panic(err)
}
Output:

func (*AlertsAPI) WithImpl

func (a *AlertsAPI) WithImpl(impl AlertsService) *AlertsAPI

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type AlertsService

type AlertsService interface {

	// Create an alert.
	//
	// Creates an alert. An alert is a Databricks SQL object that periodically
	// runs a query, evaluates a condition of its result, and notifies users or
	// notification destinations if the condition was met.
	Create(ctx context.Context, request CreateAlert) (*Alert, error)

	// Delete an alert.
	//
	// Deletes an alert. Deleted alerts are no longer accessible and cannot be
	// restored. **Note:** Unlike queries and dashboards, alerts cannot be moved
	// to the trash.
	Delete(ctx context.Context, request DeleteAlertRequest) error

	// Get an alert.
	//
	// Gets an alert.
	Get(ctx context.Context, request GetAlertRequest) (*Alert, error)

	// Get alerts.
	//
	// Gets a list of alerts.
	List(ctx context.Context) ([]Alert, error)

	// Update an alert.
	//
	// Updates an alert.
	Update(ctx context.Context, request EditAlert) error
}

The alerts API can be used to perform CRUD operations on alerts. An alert is a Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies one or more users and/or notification destinations if the condition was met. Alerts can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

type CancelExecutionRequest added in v0.3.0

type CancelExecutionRequest struct {
	StatementId string `json:"-" url:"-"`
}

Cancel statement execution

type Channel

type Channel struct {
	DbsqlVersion string `json:"dbsql_version,omitempty"`

	Name ChannelName `json:"name,omitempty"`
}

type ChannelInfo

type ChannelInfo struct {
	// DBSQL Version the channel is mapped to
	DbsqlVersion string `json:"dbsql_version,omitempty"`
	// Name of the channel
	Name ChannelName `json:"name,omitempty"`
}

Channel information for the SQL warehouse at the time of query execution

type ChannelName

type ChannelName string
const ChannelNameChannelNameCurrent ChannelName = `CHANNEL_NAME_CURRENT`
const ChannelNameChannelNameCustom ChannelName = `CHANNEL_NAME_CUSTOM`
const ChannelNameChannelNamePreview ChannelName = `CHANNEL_NAME_PREVIEW`
const ChannelNameChannelNamePrevious ChannelName = `CHANNEL_NAME_PREVIOUS`
const ChannelNameChannelNameUnspecified ChannelName = `CHANNEL_NAME_UNSPECIFIED`

func (*ChannelName) Set

func (f *ChannelName) Set(v string) error

Set raw string value and validate it against allowed values

func (*ChannelName) String

func (f *ChannelName) String() string

String representation for fmt.Print

func (*ChannelName) Type

func (f *ChannelName) Type() string

Type always returns ChannelName to satisfy [pflag.Value] interface

type ChunkInfo added in v0.3.0

type ChunkInfo struct {
	// Number of bytes in the result chunk.
	ByteCount int64 `json:"byte_count,omitempty"`
	// Position within the sequence of result set chunks.
	ChunkIndex int `json:"chunk_index,omitempty"`
	// When fetching, gives `chunk_index` for the _next_ chunk; if absent,
	// indicates there are no more chunks.
	NextChunkIndex int `json:"next_chunk_index,omitempty"`
	// When fetching, gives `internal_link` for the _next_ chunk; if absent,
	// indicates there are no more chunks.
	NextChunkInternalLink string `json:"next_chunk_internal_link,omitempty"`
	// Number of rows within the result chunk.
	RowCount int64 `json:"row_count,omitempty"`
	// Starting row offset within the result set.
	RowOffset int64 `json:"row_offset,omitempty"`
}

Describes metadata for a particular chunk, within a result set; this structure is used both within a manifest, and when fetching individual chunk data or links.

type ColumnInfo added in v0.3.0

type ColumnInfo struct {
	// Name of Column.
	Name string `json:"name,omitempty"`
	// Ordinal position of column (starting at position 0).
	Position int `json:"position,omitempty"`
	// Format of interval type.
	TypeIntervalType string `json:"type_interval_type,omitempty"`
	// Name of type (INT, STRUCT, MAP, and so on)
	TypeName ColumnInfoTypeName `json:"type_name,omitempty"`
	// Digits of precision.
	TypePrecision int `json:"type_precision,omitempty"`
	// Digits to right of decimal.
	TypeScale int `json:"type_scale,omitempty"`
	// Full data type spec, SQL/catalogString text.
	TypeText string `json:"type_text,omitempty"`
}

type ColumnInfoTypeName added in v0.3.0

type ColumnInfoTypeName string

Name of type (INT, STRUCT, MAP, and so on)

const ColumnInfoTypeNameArray ColumnInfoTypeName = `ARRAY`
const ColumnInfoTypeNameBinary ColumnInfoTypeName = `BINARY`
const ColumnInfoTypeNameBoolean ColumnInfoTypeName = `BOOLEAN`
const ColumnInfoTypeNameByte ColumnInfoTypeName = `BYTE`
const ColumnInfoTypeNameChar ColumnInfoTypeName = `CHAR`
const ColumnInfoTypeNameDate ColumnInfoTypeName = `DATE`
const ColumnInfoTypeNameDecimal ColumnInfoTypeName = `DECIMAL`
const ColumnInfoTypeNameDouble ColumnInfoTypeName = `DOUBLE`
const ColumnInfoTypeNameFloat ColumnInfoTypeName = `FLOAT`
const ColumnInfoTypeNameInt ColumnInfoTypeName = `INT`
const ColumnInfoTypeNameInterval ColumnInfoTypeName = `INTERVAL`
const ColumnInfoTypeNameLong ColumnInfoTypeName = `LONG`
const ColumnInfoTypeNameMap ColumnInfoTypeName = `MAP`
const ColumnInfoTypeNameNull ColumnInfoTypeName = `NULL`
const ColumnInfoTypeNameShort ColumnInfoTypeName = `SHORT`
const ColumnInfoTypeNameString ColumnInfoTypeName = `STRING`
const ColumnInfoTypeNameStruct ColumnInfoTypeName = `STRUCT`
const ColumnInfoTypeNameTimestamp ColumnInfoTypeName = `TIMESTAMP`
const ColumnInfoTypeNameUserDefinedType ColumnInfoTypeName = `USER_DEFINED_TYPE`

func (*ColumnInfoTypeName) Set added in v0.3.0

func (f *ColumnInfoTypeName) Set(v string) error

Set raw string value and validate it against allowed values

func (*ColumnInfoTypeName) String added in v0.3.0

func (f *ColumnInfoTypeName) String() string

String representation for fmt.Print

func (*ColumnInfoTypeName) Type added in v0.3.0

func (f *ColumnInfoTypeName) Type() string

Type always returns ColumnInfoTypeName to satisfy [pflag.Value] interface

type CreateAlert added in v0.3.0

type CreateAlert struct {
	// Name of the alert.
	Name string `json:"name"`
	// Alert configuration options.
	Options AlertOptions `json:"options"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`
	// Query ID.
	QueryId string `json:"query_id"`
	// Number of seconds after being triggered before the alert rearms itself
	// and can be triggered again. If `null`, alert will never be triggered
	// again.
	Rearm int `json:"rearm,omitempty"`
}

type CreateDashboardRequest

type CreateDashboardRequest struct {
	// Indicates whether this query object should appear in the current user's
	// favorites list. The application uses this flag to determine whether or
	// not the "favorite star " should selected.
	IsFavorite bool `json:"is_favorite,omitempty"`
	// The title of this dashboard that appears in list views and at the top of
	// the dashboard page.
	Name string `json:"name,omitempty"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`

	Tags []string `json:"tags,omitempty"`
}

Create a dashboard object

type CreateWarehouseRequest

type CreateWarehouseRequest struct {
	// The amount of time in minutes that a SQL warehouse must be idle (i.e., no
	// RUNNING queries) before it is automatically stopped.
	//
	// Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop.
	//
	// Defaults to 120 mins
	AutoStopMins int `json:"auto_stop_mins,omitempty"`
	// Channel Details
	Channel *Channel `json:"channel,omitempty"`
	// Size of the clusters allocated for this warehouse. Increasing the size of
	// a spark cluster allows you to run larger queries on it. If you want to
	// increase the number of concurrent queries, please tune max_num_clusters.
	//
	// Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large
	// - 2X-Large - 3X-Large - 4X-Large
	ClusterSize string `json:"cluster_size,omitempty"`
	// warehouse creator name
	CreatorName string `json:"creator_name,omitempty"`
	// Configures whether the warehouse should use Photon optimized clusters.
	//
	// Defaults to false.
	EnablePhoton bool `json:"enable_photon,omitempty"`
	// Configures whether the warehouse should use serverless compute
	EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"`
	// Deprecated. Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// Maximum number of clusters that the autoscaler will create to handle
	// concurrent queries.
	//
	// Supported values: - Must be >= min_num_clusters - Must be <= 30.
	//
	// Defaults to min_clusters if unset.
	MaxNumClusters int `json:"max_num_clusters,omitempty"`
	// Minimum number of available clusters that will be maintained for this SQL
	// warehouse. Increasing this will ensure that a larger number of clusters
	// are always running and therefore may reduce the cold start time for new
	// queries. This is similar to reserved vs. revocable cores in a resource
	// manager.
	//
	// Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30)
	//
	// Defaults to 1
	MinNumClusters int `json:"min_num_clusters,omitempty"`
	// Logical name for the cluster.
	//
	// Supported values: - Must be unique within an org. - Must be less than 100
	// characters.
	Name string `json:"name,omitempty"`
	// Configurations whether the warehouse should use spot instances.
	SpotInstancePolicy SpotInstancePolicy `json:"spot_instance_policy,omitempty"`
	// A set of key-value pairs that will be tagged on all resources (e.g., AWS
	// instances and EBS volumes) associated with this SQL warehouse.
	//
	// Supported values: - Number of tags < 45.
	Tags *EndpointTags `json:"tags,omitempty"`
	// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless
	// compute, you must set to `PRO` and also set the field
	// `enable_serverless_compute` to `true`.
	WarehouseType CreateWarehouseRequestWarehouseType `json:"warehouse_type,omitempty"`
}

type CreateWarehouseRequestWarehouseType added in v0.9.0

type CreateWarehouseRequestWarehouseType string

Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`.

const CreateWarehouseRequestWarehouseTypeClassic CreateWarehouseRequestWarehouseType = `CLASSIC`
const CreateWarehouseRequestWarehouseTypePro CreateWarehouseRequestWarehouseType = `PRO`
const CreateWarehouseRequestWarehouseTypeTypeUnspecified CreateWarehouseRequestWarehouseType = `TYPE_UNSPECIFIED`

func (*CreateWarehouseRequestWarehouseType) Set added in v0.9.0

Set raw string value and validate it against allowed values

func (*CreateWarehouseRequestWarehouseType) String added in v0.9.0

String representation for fmt.Print

func (*CreateWarehouseRequestWarehouseType) Type added in v0.9.0

Type always returns CreateWarehouseRequestWarehouseType to satisfy [pflag.Value] interface

type CreateWarehouseResponse

type CreateWarehouseResponse struct {
	// Id for the SQL warehouse. This value is unique across all SQL warehouses.
	Id string `json:"id,omitempty"`
}

type Dashboard

type Dashboard struct {
	// Whether the authenticated user can edit the query definition.
	CanEdit bool `json:"can_edit,omitempty"`
	// Timestamp when this dashboard was created.
	CreatedAt string `json:"created_at,omitempty"`
	// In the web application, query filters that share a name are coupled to a
	// single selection box if this value is `true`.
	DashboardFiltersEnabled bool `json:"dashboard_filters_enabled,omitempty"`
	// The ID for this dashboard.
	Id string `json:"id,omitempty"`
	// Indicates whether a dashboard is trashed. Trashed dashboards won't appear
	// in list views. If this boolean is `true`, the `options` property for this
	// dashboard includes a `moved_to_trash_at` timestamp. Items in trash are
	// permanently deleted after 30 days.
	IsArchived bool `json:"is_archived,omitempty"`
	// Whether a dashboard is a draft. Draft dashboards only appear in list
	// views for their owners.
	IsDraft bool `json:"is_draft,omitempty"`
	// Indicates whether this query object appears in the current user's
	// favorites list. This flag determines whether the star icon for favorites
	// is selected.
	IsFavorite bool `json:"is_favorite,omitempty"`
	// The title of the dashboard that appears in list views and at the top of
	// the dashboard page.
	Name string `json:"name,omitempty"`

	Options *DashboardOptions `json:"options,omitempty"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`
	// This describes an enum
	PermissionTier PermissionLevel `json:"permission_tier,omitempty"`
	// URL slug. Usually mirrors the query name with dashes (`-`) instead of
	// spaces. Appears in the URL for this query.
	Slug string `json:"slug,omitempty"`

	Tags []string `json:"tags,omitempty"`
	// Timestamp when this dashboard was last updated.
	UpdatedAt string `json:"updated_at,omitempty"`

	User *User `json:"user,omitempty"`
	// The ID of the user that created and owns this dashboard.
	UserId int `json:"user_id,omitempty"`

	Widgets []Widget `json:"widgets,omitempty"`
}

A JSON representing a dashboard containing widgets of visualizations and text boxes.

type DashboardOptions

type DashboardOptions struct {
	// The timestamp when this dashboard was moved to trash. Only present when
	// the `is_archived` property is `true`. Trashed items are deleted after
	// thirty days.
	MovedToTrashAt string `json:"moved_to_trash_at,omitempty"`
}

type DashboardsAPI

type DashboardsAPI struct {
	// contains filtered or unexported fields
}

In general, there is little need to modify dashboards using the API. However, it can be useful to use dashboard objects to look-up a collection of related query IDs. The API can also be used to duplicate multiple dashboards at once since you can get a dashboard definition with a GET request and then POST it to create a new one. Dashboards can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

func NewDashboards

func NewDashboards(client *client.DatabricksClient) *DashboardsAPI

func (*DashboardsAPI) Create

func (a *DashboardsAPI) Create(ctx context.Context, request CreateDashboardRequest) (*Dashboard, error)

Create a dashboard object.

Example (Dashboards)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Dashboards.Create(ctx, sql.CreateDashboardRequest{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = w.Dashboards.DeleteByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*DashboardsAPI) DashboardNameToIdMap

func (a *DashboardsAPI) DashboardNameToIdMap(ctx context.Context, request ListDashboardsRequest) (map[string]string, error)

DashboardNameToIdMap calls DashboardsAPI.ListAll and creates a map of results with Dashboard.Name as key and Dashboard.Id as value.

Returns an error if there's more than one Dashboard with the same .Name.

Note: All Dashboard instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*DashboardsAPI) Delete

func (a *DashboardsAPI) Delete(ctx context.Context, request DeleteDashboardRequest) error

Remove a dashboard.

Moves a dashboard to the trash. Trashed dashboards do not appear in list views or searches, and cannot be shared.

Example (Dashboards)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Dashboards.Create(ctx, sql.CreateDashboardRequest{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

err = w.Dashboards.DeleteByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}

// cleanup

err = w.Dashboards.DeleteByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*DashboardsAPI) DeleteByDashboardId

func (a *DashboardsAPI) DeleteByDashboardId(ctx context.Context, dashboardId string) error

Remove a dashboard.

Moves a dashboard to the trash. Trashed dashboards do not appear in list views or searches, and cannot be shared.

func (*DashboardsAPI) Get

Retrieve a definition.

Returns a JSON representation of a dashboard object, including its visualization and query objects.

Example (Dashboards)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Dashboards.Create(ctx, sql.CreateDashboardRequest{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

byId, err := w.Dashboards.GetByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = w.Dashboards.DeleteByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*DashboardsAPI) GetByDashboardId

func (a *DashboardsAPI) GetByDashboardId(ctx context.Context, dashboardId string) (*Dashboard, error)

Retrieve a definition.

Returns a JSON representation of a dashboard object, including its visualization and query objects.

func (*DashboardsAPI) GetByName

func (a *DashboardsAPI) GetByName(ctx context.Context, name string) (*Dashboard, error)

GetByName calls DashboardsAPI.DashboardNameToIdMap and returns a single Dashboard.

Returns an error if there's more than one Dashboard with the same .Name.

Note: All Dashboard instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*DashboardsAPI) Impl

func (a *DashboardsAPI) Impl() DashboardsService

Impl returns low-level Dashboards API implementation

func (*DashboardsAPI) ListAll

func (a *DashboardsAPI) ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error)

Get dashboard objects.

Fetch a paginated list of dashboard objects.

This method is generated by Databricks SDK Code Generator.

Example (Dashboards)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.Dashboards.ListAll(ctx, sql.ListDashboardsRequest{})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*DashboardsAPI) Restore

func (a *DashboardsAPI) Restore(ctx context.Context, request RestoreDashboardRequest) error

Restore a dashboard.

A restored dashboard appears in list views and searches and can be shared.

Example (Dashboards)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Dashboards.Create(ctx, sql.CreateDashboardRequest{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

err = w.Dashboards.Restore(ctx, sql.RestoreDashboardRequest{
	DashboardId: created.Id,
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Dashboards.DeleteByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*DashboardsAPI) WithImpl

func (a *DashboardsAPI) WithImpl(impl DashboardsService) *DashboardsAPI

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type DashboardsService

type DashboardsService interface {

	// Create a dashboard object.
	Create(ctx context.Context, request CreateDashboardRequest) (*Dashboard, error)

	// Remove a dashboard.
	//
	// Moves a dashboard to the trash. Trashed dashboards do not appear in list
	// views or searches, and cannot be shared.
	Delete(ctx context.Context, request DeleteDashboardRequest) error

	// Retrieve a definition.
	//
	// Returns a JSON representation of a dashboard object, including its
	// visualization and query objects.
	Get(ctx context.Context, request GetDashboardRequest) (*Dashboard, error)

	// Get dashboard objects.
	//
	// Fetch a paginated list of dashboard objects.
	//
	// Use ListAll() to get all Dashboard instances, which will iterate over every result page.
	List(ctx context.Context, request ListDashboardsRequest) (*ListResponse, error)

	// Restore a dashboard.
	//
	// A restored dashboard appears in list views and searches and can be
	// shared.
	Restore(ctx context.Context, request RestoreDashboardRequest) error
}

In general, there is little need to modify dashboards using the API. However, it can be useful to use dashboard objects to look-up a collection of related query IDs. The API can also be used to duplicate multiple dashboards at once since you can get a dashboard definition with a GET request and then POST it to create a new one. Dashboards can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

type DataSource

type DataSource struct {
	// Data source ID.
	Id string `json:"id,omitempty"`
	// The string name of this data source / SQL warehouse as it appears in the
	// Databricks SQL web application.
	Name string `json:"name,omitempty"`
	// Reserved for internal use.
	PauseReason string `json:"pause_reason,omitempty"`
	// Reserved for internal use.
	Paused int `json:"paused,omitempty"`
	// Reserved for internal use.
	SupportsAutoLimit bool `json:"supports_auto_limit,omitempty"`
	// Reserved for internal use.
	Syntax string `json:"syntax,omitempty"`
	// The type of data source. For SQL warehouses, this will be
	// `databricks_internal`.
	Type string `json:"type,omitempty"`
	// Reserved for internal use.
	ViewOnly bool `json:"view_only,omitempty"`
	// The ID of the associated SQL warehouse, if this data source is backed by
	// a SQL warehouse.
	WarehouseId string `json:"warehouse_id,omitempty"`
}

A JSON object representing a DBSQL data source / SQL warehouse.

type DataSourcesAPI

type DataSourcesAPI struct {
	// contains filtered or unexported fields
}

This API is provided to assist you in making new query objects. When creating a query object, you may optionally specify a `data_source_id` for the SQL warehouse against which it will run. If you don't already know the `data_source_id` for your desired SQL warehouse, this API will help you find it.

This API does not support searches. It returns the full list of SQL warehouses in your workspace. We advise you to use any text editor, REST client, or `grep` to search the response from this API for the name of your SQL warehouse as it appears in Databricks SQL.

func NewDataSources

func NewDataSources(client *client.DatabricksClient) *DataSourcesAPI

func (*DataSourcesAPI) DataSourceNameToIdMap

func (a *DataSourcesAPI) DataSourceNameToIdMap(ctx context.Context) (map[string]string, error)

DataSourceNameToIdMap calls DataSourcesAPI.List and creates a map of results with DataSource.Name as key and DataSource.Id as value.

Returns an error if there's more than one DataSource with the same .Name.

Note: All DataSource instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*DataSourcesAPI) GetByName

func (a *DataSourcesAPI) GetByName(ctx context.Context, name string) (*DataSource, error)

GetByName calls DataSourcesAPI.DataSourceNameToIdMap and returns a single DataSource.

Returns an error if there's more than one DataSource with the same .Name.

Note: All DataSource instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*DataSourcesAPI) Impl

Impl returns low-level DataSources API implementation

func (*DataSourcesAPI) List

func (a *DataSourcesAPI) List(ctx context.Context) ([]DataSource, error)

Get a list of SQL warehouses.

Retrieves a full list of SQL warehouses available in this workspace. All fields that appear in this API response are enumerated for clarity. However, you need only a SQL warehouse's `id` to create new queries against it.

func (*DataSourcesAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type DataSourcesService

type DataSourcesService interface {

	// Get a list of SQL warehouses.
	//
	// Retrieves a full list of SQL warehouses available in this workspace. All
	// fields that appear in this API response are enumerated for clarity.
	// However, you need only a SQL warehouse's `id` to create new queries
	// against it.
	List(ctx context.Context) ([]DataSource, error)
}

This API is provided to assist you in making new query objects. When creating a query object, you may optionally specify a `data_source_id` for the SQL warehouse against which it will run. If you don't already know the `data_source_id` for your desired SQL warehouse, this API will help you find it.

This API does not support searches. It returns the full list of SQL warehouses in your workspace. We advise you to use any text editor, REST client, or `grep` to search the response from this API for the name of your SQL warehouse as it appears in Databricks SQL.

type DbsqlPermissionsAPI

type DbsqlPermissionsAPI struct {
	// contains filtered or unexported fields
}

The SQL Permissions API is similar to the endpoints of the :method:permissions/set. However, this exposes only one endpoint, which gets the Access Control List for a given object. You cannot modify any permissions using this API.

There are three levels of permission:

- `CAN_VIEW`: Allows read-only access

- `CAN_RUN`: Allows read access and run access (superset of `CAN_VIEW`)

- `CAN_MANAGE`: Allows all actions: read, run, edit, delete, modify permissions (superset of `CAN_RUN`)

func NewDbsqlPermissions

func NewDbsqlPermissions(client *client.DatabricksClient) *DbsqlPermissionsAPI

func (*DbsqlPermissionsAPI) Get

Get object ACL.

Gets a JSON representation of the access control list (ACL) for a specified object.

func (*DbsqlPermissionsAPI) GetByObjectTypeAndObjectId

func (a *DbsqlPermissionsAPI) GetByObjectTypeAndObjectId(ctx context.Context, objectType ObjectTypePlural, objectId string) (*GetResponse, error)

Get object ACL.

Gets a JSON representation of the access control list (ACL) for a specified object.

func (*DbsqlPermissionsAPI) Impl

Impl returns low-level DbsqlPermissions API implementation

func (*DbsqlPermissionsAPI) Set

Set object ACL.

Sets the access control list (ACL) for a specified object. This operation will complete rewrite the ACL.

func (*DbsqlPermissionsAPI) TransferOwnership

func (a *DbsqlPermissionsAPI) TransferOwnership(ctx context.Context, request TransferOwnershipRequest) (*Success, error)

Transfer object ownership.

Transfers ownership of a dashboard, query, or alert to an active user. Requires an admin API key.

func (*DbsqlPermissionsAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type DbsqlPermissionsService

type DbsqlPermissionsService interface {

	// Get object ACL.
	//
	// Gets a JSON representation of the access control list (ACL) for a
	// specified object.
	Get(ctx context.Context, request GetDbsqlPermissionRequest) (*GetResponse, error)

	// Set object ACL.
	//
	// Sets the access control list (ACL) for a specified object. This operation
	// will complete rewrite the ACL.
	Set(ctx context.Context, request SetRequest) (*SetResponse, error)

	// Transfer object ownership.
	//
	// Transfers ownership of a dashboard, query, or alert to an active user.
	// Requires an admin API key.
	TransferOwnership(ctx context.Context, request TransferOwnershipRequest) (*Success, error)
}

The SQL Permissions API is similar to the endpoints of the :method:permissions/set. However, this exposes only one endpoint, which gets the Access Control List for a given object. You cannot modify any permissions using this API.

There are three levels of permission:

- `CAN_VIEW`: Allows read-only access

- `CAN_RUN`: Allows read access and run access (superset of `CAN_VIEW`)

- `CAN_MANAGE`: Allows all actions: read, run, edit, delete, modify permissions (superset of `CAN_RUN`)

type DeleteAlertRequest

type DeleteAlertRequest struct {
	AlertId string `json:"-" url:"-"`
}

Delete an alert

type DeleteDashboardRequest

type DeleteDashboardRequest struct {
	DashboardId string `json:"-" url:"-"`
}

Remove a dashboard

type DeleteQueryRequest

type DeleteQueryRequest struct {
	QueryId string `json:"-" url:"-"`
}

Delete a query

type DeleteWarehouseRequest

type DeleteWarehouseRequest struct {
	// Required. Id of the SQL warehouse.
	Id string `json:"-" url:"-"`
}

Delete a warehouse

type Disposition added in v0.3.0

type Disposition string

The fetch disposition provides two modes of fetching results: `INLINE` and `EXTERNAL_LINKS`.

Statements executed with `INLINE` disposition will return result data inline, in `JSON_ARRAY` format, in a series of chunks. If a given statement produces a result set with a size larger than 16 MiB, that statement execution is aborted, and no result set will be available.

**NOTE** Byte limits are computed based upon internal representations of the result set data, and may not match the sizes visible in JSON responses.

Statements executed with `EXTERNAL_LINKS` disposition will return result data as external links: URLs that point to cloud storage internal to the workspace. Using `EXTERNAL_LINKS` disposition allows statements to generate arbitrarily sized result sets for fetching up to 100 GiB. The resulting links have two important properties:

1. They point to resources _external_ to the Databricks compute; therefore any associated authentication information (typically a personal access token, OAuth token, or similar) _must be removed_ when fetching from these links.

2. These are presigned URLs with a specific expiration, indicated in the response. The behavior when attempting to use an expired link is cloud specific.

const DispositionExternalLinks Disposition = `EXTERNAL_LINKS`
const DispositionInline Disposition = `INLINE`

func (*Disposition) Set added in v0.3.0

func (f *Disposition) Set(v string) error

Set raw string value and validate it against allowed values

func (*Disposition) String added in v0.3.0

func (f *Disposition) String() string

String representation for fmt.Print

func (*Disposition) Type added in v0.3.0

func (f *Disposition) Type() string

Type always returns Disposition to satisfy [pflag.Value] interface

type EditAlert

type EditAlert struct {
	AlertId string `json:"-" url:"-"`
	// Name of the alert.
	Name string `json:"name"`
	// Alert configuration options.
	Options AlertOptions `json:"options"`
	// Query ID.
	QueryId string `json:"query_id"`
	// Number of seconds after being triggered before the alert rearms itself
	// and can be triggered again. If `null`, alert will never be triggered
	// again.
	Rearm int `json:"rearm,omitempty"`
}

type EditWarehouseRequest

type EditWarehouseRequest struct {
	// The amount of time in minutes that a SQL warehouse must be idle (i.e., no
	// RUNNING queries) before it is automatically stopped.
	//
	// Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop.
	//
	// Defaults to 120 mins
	AutoStopMins int `json:"auto_stop_mins,omitempty"`
	// Channel Details
	Channel *Channel `json:"channel,omitempty"`
	// Size of the clusters allocated for this warehouse. Increasing the size of
	// a spark cluster allows you to run larger queries on it. If you want to
	// increase the number of concurrent queries, please tune max_num_clusters.
	//
	// Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large
	// - 2X-Large - 3X-Large - 4X-Large
	ClusterSize string `json:"cluster_size,omitempty"`
	// warehouse creator name
	CreatorName string `json:"creator_name,omitempty"`
	// Configures whether the warehouse should use Photon optimized clusters.
	//
	// Defaults to false.
	EnablePhoton bool `json:"enable_photon,omitempty"`
	// Configures whether the warehouse should use serverless compute.
	EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"`
	// Required. Id of the warehouse to configure.
	Id string `json:"-" url:"-"`
	// Deprecated. Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// Maximum number of clusters that the autoscaler will create to handle
	// concurrent queries.
	//
	// Supported values: - Must be >= min_num_clusters - Must be <= 30.
	//
	// Defaults to min_clusters if unset.
	MaxNumClusters int `json:"max_num_clusters,omitempty"`
	// Minimum number of available clusters that will be maintained for this SQL
	// warehouse. Increasing this will ensure that a larger number of clusters
	// are always running and therefore may reduce the cold start time for new
	// queries. This is similar to reserved vs. revocable cores in a resource
	// manager.
	//
	// Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30)
	//
	// Defaults to 1
	MinNumClusters int `json:"min_num_clusters,omitempty"`
	// Logical name for the cluster.
	//
	// Supported values: - Must be unique within an org. - Must be less than 100
	// characters.
	Name string `json:"name,omitempty"`
	// Configurations whether the warehouse should use spot instances.
	SpotInstancePolicy SpotInstancePolicy `json:"spot_instance_policy,omitempty"`
	// A set of key-value pairs that will be tagged on all resources (e.g., AWS
	// instances and EBS volumes) associated with this SQL warehouse.
	//
	// Supported values: - Number of tags < 45.
	Tags *EndpointTags `json:"tags,omitempty"`
	// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless
	// compute, you must set to `PRO` and also set the field
	// `enable_serverless_compute` to `true`.
	WarehouseType EditWarehouseRequestWarehouseType `json:"warehouse_type,omitempty"`
}

type EditWarehouseRequestWarehouseType added in v0.9.0

type EditWarehouseRequestWarehouseType string

Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`.

const EditWarehouseRequestWarehouseTypeClassic EditWarehouseRequestWarehouseType = `CLASSIC`
const EditWarehouseRequestWarehouseTypePro EditWarehouseRequestWarehouseType = `PRO`
const EditWarehouseRequestWarehouseTypeTypeUnspecified EditWarehouseRequestWarehouseType = `TYPE_UNSPECIFIED`

func (*EditWarehouseRequestWarehouseType) Set added in v0.9.0

Set raw string value and validate it against allowed values

func (*EditWarehouseRequestWarehouseType) String added in v0.9.0

String representation for fmt.Print

func (*EditWarehouseRequestWarehouseType) Type added in v0.9.0

Type always returns EditWarehouseRequestWarehouseType to satisfy [pflag.Value] interface

type EndpointConfPair

type EndpointConfPair struct {
	Key string `json:"key,omitempty"`

	Value string `json:"value,omitempty"`
}

type EndpointHealth

type EndpointHealth struct {
	// Details about errors that are causing current degraded/failed status.
	Details string `json:"details,omitempty"`
	// The reason for failure to bring up clusters for this warehouse. This is
	// available when status is 'FAILED' and sometimes when it is DEGRADED.
	FailureReason *TerminationReason `json:"failure_reason,omitempty"`
	// Deprecated. split into summary and details for security
	Message string `json:"message,omitempty"`
	// Health status of the warehouse.
	Status Status `json:"status,omitempty"`
	// A short summary of the health status in case of degraded/failed
	// warehouses.
	Summary string `json:"summary,omitempty"`
}

type EndpointInfo

type EndpointInfo struct {
	// The amount of time in minutes that a SQL warehouse must be idle (i.e., no
	// RUNNING queries) before it is automatically stopped.
	//
	// Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop.
	//
	// Defaults to 120 mins
	AutoStopMins int `json:"auto_stop_mins,omitempty"`
	// Channel Details
	Channel *Channel `json:"channel,omitempty"`
	// Size of the clusters allocated for this warehouse. Increasing the size of
	// a spark cluster allows you to run larger queries on it. If you want to
	// increase the number of concurrent queries, please tune max_num_clusters.
	//
	// Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large
	// - 2X-Large - 3X-Large - 4X-Large
	ClusterSize string `json:"cluster_size,omitempty"`
	// warehouse creator name
	CreatorName string `json:"creator_name,omitempty"`
	// Configures whether the warehouse should use Photon optimized clusters.
	//
	// Defaults to false.
	EnablePhoton bool `json:"enable_photon,omitempty"`
	// Configures whether the warehouse should use serverless compute
	EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"`
	// Optional health status. Assume the warehouse is healthy if this field is
	// not set.
	Health *EndpointHealth `json:"health,omitempty"`
	// unique identifier for warehouse
	Id string `json:"id,omitempty"`
	// Deprecated. Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// the jdbc connection string for this warehouse
	JdbcUrl string `json:"jdbc_url,omitempty"`
	// Maximum number of clusters that the autoscaler will create to handle
	// concurrent queries.
	//
	// Supported values: - Must be >= min_num_clusters - Must be <= 30.
	//
	// Defaults to min_clusters if unset.
	MaxNumClusters int `json:"max_num_clusters,omitempty"`
	// Minimum number of available clusters that will be maintained for this SQL
	// warehouse. Increasing this will ensure that a larger number of clusters
	// are always running and therefore may reduce the cold start time for new
	// queries. This is similar to reserved vs. revocable cores in a resource
	// manager.
	//
	// Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30)
	//
	// Defaults to 1
	MinNumClusters int `json:"min_num_clusters,omitempty"`
	// Logical name for the cluster.
	//
	// Supported values: - Must be unique within an org. - Must be less than 100
	// characters.
	Name string `json:"name,omitempty"`
	// current number of active sessions for the warehouse
	NumActiveSessions int64 `json:"num_active_sessions,omitempty"`
	// current number of clusters running for the service
	NumClusters int `json:"num_clusters,omitempty"`
	// ODBC parameters for the SQL warehouse
	OdbcParams *OdbcParams `json:"odbc_params,omitempty"`
	// Configurations whether the warehouse should use spot instances.
	SpotInstancePolicy SpotInstancePolicy `json:"spot_instance_policy,omitempty"`
	// State of the warehouse
	State State `json:"state,omitempty"`
	// A set of key-value pairs that will be tagged on all resources (e.g., AWS
	// instances and EBS volumes) associated with this SQL warehouse.
	//
	// Supported values: - Number of tags < 45.
	Tags *EndpointTags `json:"tags,omitempty"`
	// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless
	// compute, you must set to `PRO` and also set the field
	// `enable_serverless_compute` to `true`.
	WarehouseType EndpointInfoWarehouseType `json:"warehouse_type,omitempty"`
}

type EndpointInfoWarehouseType added in v0.9.0

type EndpointInfoWarehouseType string

Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`.

const EndpointInfoWarehouseTypeClassic EndpointInfoWarehouseType = `CLASSIC`
const EndpointInfoWarehouseTypePro EndpointInfoWarehouseType = `PRO`
const EndpointInfoWarehouseTypeTypeUnspecified EndpointInfoWarehouseType = `TYPE_UNSPECIFIED`

func (*EndpointInfoWarehouseType) Set added in v0.9.0

Set raw string value and validate it against allowed values

func (*EndpointInfoWarehouseType) String added in v0.9.0

func (f *EndpointInfoWarehouseType) String() string

String representation for fmt.Print

func (*EndpointInfoWarehouseType) Type added in v0.9.0

Type always returns EndpointInfoWarehouseType to satisfy [pflag.Value] interface

type EndpointTagPair

type EndpointTagPair struct {
	Key string `json:"key,omitempty"`

	Value string `json:"value,omitempty"`
}

type EndpointTags

type EndpointTags struct {
	CustomTags []EndpointTagPair `json:"custom_tags,omitempty"`
}

type ExecuteStatementRequest added in v0.3.0

type ExecuteStatementRequest struct {
	// Applies the given byte limit to the statement's result size. Byte counts
	// are based on internal representations and may not match measurable sizes
	// in the requested `format`.
	ByteLimit int64 `json:"byte_limit,omitempty"`
	// Sets default catalog for statement execution, similar to [`USE CATALOG`]
	// in SQL.
	//
	// [`USE CATALOG`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html
	Catalog string `json:"catalog,omitempty"`
	// The fetch disposition provides two modes of fetching results: `INLINE`
	// and `EXTERNAL_LINKS`.
	//
	// Statements executed with `INLINE` disposition will return result data
	// inline, in `JSON_ARRAY` format, in a series of chunks. If a given
	// statement produces a result set with a size larger than 16 MiB, that
	// statement execution is aborted, and no result set will be available.
	//
	// **NOTE** Byte limits are computed based upon internal representations of
	// the result set data, and may not match the sizes visible in JSON
	// responses.
	//
	// Statements executed with `EXTERNAL_LINKS` disposition will return result
	// data as external links: URLs that point to cloud storage internal to the
	// workspace. Using `EXTERNAL_LINKS` disposition allows statements to
	// generate arbitrarily sized result sets for fetching up to 100 GiB. The
	// resulting links have two important properties:
	//
	// 1. They point to resources _external_ to the Databricks compute;
	// therefore any associated authentication information (typically a personal
	// access token, OAuth token, or similar) _must be removed_ when fetching
	// from these links.
	//
	// 2. These are presigned URLs with a specific expiration, indicated in the
	// response. The behavior when attempting to use an expired link is cloud
	// specific.
	Disposition Disposition `json:"disposition,omitempty"`
	// Statement execution supports three result formats: `JSON_ARRAY`
	// (default), `ARROW_STREAM`, and `CSV`.
	//
	// When specifying `format=JSON_ARRAY`, result data will be formatted as an
	// array of arrays of values, where each value is either the *string
	// representation* of a value, or `null`. For example, the output of `SELECT
	// concat('id-', id) AS strCol, id AS intCol, null AS nullCol FROM range(3)`
	// would look like this:
	//
	// “` [ [ "id-1", "1", null ], [ "id-2", "2", null ], [ "id-3", "3", null
	// ], ] “`
	//
	// `JSON_ARRAY` is supported with `INLINE` and `EXTERNAL_LINKS`
	// dispositions.
	//
	// `INLINE` `JSON_ARRAY` data can be found at the path
	// `StatementResponse.result.data_array`.
	//
	// For `EXTERNAL_LINKS` `JSON_ARRAY` results, each URL points to a file in
	// cloud storage that contains compact JSON with no indentation or extra
	// whitespace.
	//
	// When specifying `format=ARROW_STREAM`, each chunk in the result will be
	// formatted as Apache Arrow Stream. See the [Apache Arrow streaming
	// format].
	//
	// IMPORTANT: The format `ARROW_STREAM` is supported only with
	// `EXTERNAL_LINKS` disposition.
	//
	// When specifying `format=CSV`, each chunk in the result will be a CSV
	// according to [RFC 4180] standard. All the columns values will have
	// *string representation* similar to the `JSON_ARRAY` format, and `null`
	// values will be encoded as “null”. Only the first chunk in the result
	// would contain a header row with column names. For example, the output of
	// `SELECT concat('id-', id) AS strCol, id AS intCol, null as nullCol FROM
	// range(3)` would look like this:
	//
	// “` strCol,intCol,nullCol id-1,1,null id-2,2,null id-3,3,null “`
	//
	// IMPORTANT: The format `CSV` is supported only with `EXTERNAL_LINKS`
	// disposition.
	//
	// [Apache Arrow streaming format]: https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format
	// [RFC 4180]: https://www.rfc-editor.org/rfc/rfc4180
	Format Format `json:"format,omitempty"`
	// When in synchronous mode with `wait_timeout > 0s` it determines the
	// action taken when the timeout is reached:
	//
	// `CONTINUE` → the statement execution continues asynchronously and the
	// call returns a statement ID immediately.
	//
	// `CANCEL` → the statement execution is canceled and the call returns
	// immediately with a `CANCELED` state.
	OnWaitTimeout TimeoutAction `json:"on_wait_timeout,omitempty"`
	// Sets default schema for statement execution, similar to [`USE SCHEMA`] in
	// SQL.
	//
	// [`USE SCHEMA`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-schema.html
	Schema string `json:"schema,omitempty"`
	// SQL statement to execute
	Statement string `json:"statement,omitempty"`
	// The time in seconds the API service will wait for the statement's result
	// set as `Ns`, where `N` can be set to 0 or to a value between 5 and 50.
	// When set to '0s' the statement will execute in asynchronous mode."
	WaitTimeout string `json:"wait_timeout,omitempty"`
	// Warehouse upon which to execute a statement. See also [What are SQL
	// warehouses?](/sql/admin/warehouse-type.html)
	WarehouseId string `json:"warehouse_id,omitempty"`
}

type ExecuteStatementResponse added in v0.3.0

type ExecuteStatementResponse struct {
	// The result manifest provides schema and metadata for the result set.
	Manifest *ResultManifest `json:"manifest,omitempty"`
	// Result data chunks are delivered in either the `chunk` field when using
	// `INLINE` disposition, or in the `external_link` field when using
	// `EXTERNAL_LINKS` disposition. Exactly one of these will be set.
	Result *ResultData `json:"result,omitempty"`
	// Statement ID is returned upon successfully submitting a SQL statement,
	// and is a required reference for all subsequent calls.
	StatementId string `json:"statement_id,omitempty"`
	// Status response includes execution state and if relevant, error
	// information.
	Status *StatementStatus `json:"status,omitempty"`
}
type ExternalLink struct {
	// Number of bytes in the result chunk.
	ByteCount int64 `json:"byte_count,omitempty"`
	// Position within the sequence of result set chunks.
	ChunkIndex int `json:"chunk_index,omitempty"`
	// Indicates date-time that the given external link will expire and become
	// invalid, after which point a new `external_link` must be requested.
	Expiration string `json:"expiration,omitempty"`
	// Pre-signed URL pointing to a chunk of result data, hosted by an external
	// service, with a short expiration time (< 1 hour).
	ExternalLink string `json:"external_link,omitempty"`
	// When fetching, gives `chunk_index` for the _next_ chunk; if absent,
	// indicates there are no more chunks.
	NextChunkIndex int `json:"next_chunk_index,omitempty"`
	// When fetching, gives `internal_link` for the _next_ chunk; if absent,
	// indicates there are no more chunks.
	NextChunkInternalLink string `json:"next_chunk_internal_link,omitempty"`
	// Number of rows within the result chunk.
	RowCount int64 `json:"row_count,omitempty"`
	// Starting row offset within the result set.
	RowOffset int64 `json:"row_offset,omitempty"`
}

type Format added in v0.3.0

type Format string

Statement execution supports three result formats: `JSON_ARRAY` (default), `ARROW_STREAM`, and `CSV`.

When specifying `format=JSON_ARRAY`, result data will be formatted as an array of arrays of values, where each value is either the *string representation* of a value, or `null`. For example, the output of `SELECT concat('id-', id) AS strCol, id AS intCol, null AS nullCol FROM range(3)` would look like this:

``` [ [ "id-1", "1", null ], [ "id-2", "2", null ], [ "id-3", "3", null ], ] ```

`JSON_ARRAY` is supported with `INLINE` and `EXTERNAL_LINKS` dispositions.

`INLINE` `JSON_ARRAY` data can be found at the path `StatementResponse.result.data_array`.

For `EXTERNAL_LINKS` `JSON_ARRAY` results, each URL points to a file in cloud storage that contains compact JSON with no indentation or extra whitespace.

When specifying `format=ARROW_STREAM`, each chunk in the result will be formatted as Apache Arrow Stream. See the Apache Arrow streaming format.

IMPORTANT: The format `ARROW_STREAM` is supported only with `EXTERNAL_LINKS` disposition.

When specifying `format=CSV`, each chunk in the result will be a CSV according to RFC 4180 standard. All the columns values will have *string representation* similar to the `JSON_ARRAY` format, and `null` values will be encoded as “null”. Only the first chunk in the result would contain a header row with column names. For example, the output of `SELECT concat('id-', id) AS strCol, id AS intCol, null as nullCol FROM range(3)` would look like this:

``` strCol,intCol,nullCol id-1,1,null id-2,2,null id-3,3,null ```

IMPORTANT: The format `CSV` is supported only with `EXTERNAL_LINKS` disposition.

const FormatArrowStream Format = `ARROW_STREAM`
const FormatCsv Format = `CSV`
const FormatJsonArray Format = `JSON_ARRAY`

func (*Format) Set added in v0.3.0

func (f *Format) Set(v string) error

Set raw string value and validate it against allowed values

func (*Format) String added in v0.3.0

func (f *Format) String() string

String representation for fmt.Print

func (*Format) Type added in v0.3.0

func (f *Format) Type() string

Type always returns Format to satisfy [pflag.Value] interface

type GetAlertRequest

type GetAlertRequest struct {
	AlertId string `json:"-" url:"-"`
}

Get an alert

type GetDashboardRequest

type GetDashboardRequest struct {
	DashboardId string `json:"-" url:"-"`
}

Retrieve a definition

type GetDbsqlPermissionRequest

type GetDbsqlPermissionRequest struct {
	// Object ID. An ACL is returned for the object with this UUID.
	ObjectId string `json:"-" url:"-"`
	// The type of object permissions to check.
	ObjectType ObjectTypePlural `json:"-" url:"-"`
}

Get object ACL

type GetQueryRequest

type GetQueryRequest struct {
	QueryId string `json:"-" url:"-"`
}

Get a query definition.

type GetResponse

type GetResponse struct {
	AccessControlList []AccessControl `json:"access_control_list,omitempty"`
	// An object's type and UUID, separated by a forward slash (/) character.
	ObjectId string `json:"object_id,omitempty"`
	// A singular noun object type.
	ObjectType ObjectType `json:"object_type,omitempty"`
}

type GetStatementRequest added in v0.3.0

type GetStatementRequest struct {
	StatementId string `json:"-" url:"-"`
}

Get status, manifest, and result first chunk

type GetStatementResponse added in v0.3.0

type GetStatementResponse struct {
	// The result manifest provides schema and metadata for the result set.
	Manifest *ResultManifest `json:"manifest,omitempty"`
	// Result data chunks are delivered in either the `chunk` field when using
	// `INLINE` disposition, or in the `external_link` field when using
	// `EXTERNAL_LINKS` disposition. Exactly one of these will be set.
	Result *ResultData `json:"result,omitempty"`
	// Statement ID is returned upon successfully submitting a SQL statement,
	// and is a required reference for all subsequent calls.
	StatementId string `json:"statement_id,omitempty"`
	// Status response includes execution state and if relevant, error
	// information.
	Status *StatementStatus `json:"status,omitempty"`
}

type GetStatementResultChunkNRequest added in v0.3.0

type GetStatementResultChunkNRequest struct {
	ChunkIndex int `json:"-" url:"-"`

	StatementId string `json:"-" url:"-"`
}

Get result chunk by index

type GetWarehouseRequest

type GetWarehouseRequest struct {
	// Required. Id of the SQL warehouse.
	Id string `json:"-" url:"-"`
}

Get warehouse info

type GetWarehouseResponse

type GetWarehouseResponse struct {
	// The amount of time in minutes that a SQL warehouse must be idle (i.e., no
	// RUNNING queries) before it is automatically stopped.
	//
	// Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop.
	//
	// Defaults to 120 mins
	AutoStopMins int `json:"auto_stop_mins,omitempty"`
	// Channel Details
	Channel *Channel `json:"channel,omitempty"`
	// Size of the clusters allocated for this warehouse. Increasing the size of
	// a spark cluster allows you to run larger queries on it. If you want to
	// increase the number of concurrent queries, please tune max_num_clusters.
	//
	// Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large
	// - 2X-Large - 3X-Large - 4X-Large
	ClusterSize string `json:"cluster_size,omitempty"`
	// warehouse creator name
	CreatorName string `json:"creator_name,omitempty"`
	// Configures whether the warehouse should use Photon optimized clusters.
	//
	// Defaults to false.
	EnablePhoton bool `json:"enable_photon,omitempty"`
	// Configures whether the warehouse should use serverless compute
	EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"`
	// Optional health status. Assume the warehouse is healthy if this field is
	// not set.
	Health *EndpointHealth `json:"health,omitempty"`
	// unique identifier for warehouse
	Id string `json:"id,omitempty"`
	// Deprecated. Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// the jdbc connection string for this warehouse
	JdbcUrl string `json:"jdbc_url,omitempty"`
	// Maximum number of clusters that the autoscaler will create to handle
	// concurrent queries.
	//
	// Supported values: - Must be >= min_num_clusters - Must be <= 30.
	//
	// Defaults to min_clusters if unset.
	MaxNumClusters int `json:"max_num_clusters,omitempty"`
	// Minimum number of available clusters that will be maintained for this SQL
	// warehouse. Increasing this will ensure that a larger number of clusters
	// are always running and therefore may reduce the cold start time for new
	// queries. This is similar to reserved vs. revocable cores in a resource
	// manager.
	//
	// Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30)
	//
	// Defaults to 1
	MinNumClusters int `json:"min_num_clusters,omitempty"`
	// Logical name for the cluster.
	//
	// Supported values: - Must be unique within an org. - Must be less than 100
	// characters.
	Name string `json:"name,omitempty"`
	// current number of active sessions for the warehouse
	NumActiveSessions int64 `json:"num_active_sessions,omitempty"`
	// current number of clusters running for the service
	NumClusters int `json:"num_clusters,omitempty"`
	// ODBC parameters for the SQL warehouse
	OdbcParams *OdbcParams `json:"odbc_params,omitempty"`
	// Configurations whether the warehouse should use spot instances.
	SpotInstancePolicy SpotInstancePolicy `json:"spot_instance_policy,omitempty"`
	// State of the warehouse
	State State `json:"state,omitempty"`
	// A set of key-value pairs that will be tagged on all resources (e.g., AWS
	// instances and EBS volumes) associated with this SQL warehouse.
	//
	// Supported values: - Number of tags < 45.
	Tags *EndpointTags `json:"tags,omitempty"`
	// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless
	// compute, you must set to `PRO` and also set the field
	// `enable_serverless_compute` to `true`.
	WarehouseType GetWarehouseResponseWarehouseType `json:"warehouse_type,omitempty"`
}

type GetWarehouseResponseWarehouseType added in v0.9.0

type GetWarehouseResponseWarehouseType string

Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`.

const GetWarehouseResponseWarehouseTypeClassic GetWarehouseResponseWarehouseType = `CLASSIC`
const GetWarehouseResponseWarehouseTypePro GetWarehouseResponseWarehouseType = `PRO`
const GetWarehouseResponseWarehouseTypeTypeUnspecified GetWarehouseResponseWarehouseType = `TYPE_UNSPECIFIED`

func (*GetWarehouseResponseWarehouseType) Set added in v0.9.0

Set raw string value and validate it against allowed values

func (*GetWarehouseResponseWarehouseType) String added in v0.9.0

String representation for fmt.Print

func (*GetWarehouseResponseWarehouseType) Type added in v0.9.0

Type always returns GetWarehouseResponseWarehouseType to satisfy [pflag.Value] interface

type GetWorkspaceWarehouseConfigResponse

type GetWorkspaceWarehouseConfigResponse struct {
	// Optional: Channel selection details
	Channel *Channel `json:"channel,omitempty"`
	// Deprecated: Use sql_configuration_parameters
	ConfigParam *RepeatedEndpointConfPairs `json:"config_param,omitempty"`
	// Spark confs for external hive metastore configuration JSON serialized
	// size must be less than <= 512K
	DataAccessConfig []EndpointConfPair `json:"data_access_config,omitempty"`
	// List of Warehouse Types allowed in this workspace (limits allowed value
	// of the type field in CreateWarehouse and EditWarehouse). Note: Some types
	// cannot be disabled, they don't need to be specified in
	// SetWorkspaceWarehouseConfig. Note: Disabling a type may cause existing
	// warehouses to be converted to another type. Used by frontend to save
	// specific type availability in the warehouse create and edit form UI.
	EnabledWarehouseTypes []WarehouseTypePair `json:"enabled_warehouse_types,omitempty"`
	// Deprecated: Use sql_configuration_parameters
	GlobalParam *RepeatedEndpointConfPairs `json:"global_param,omitempty"`
	// GCP only: Google Service Account used to pass to cluster to access Google
	// Cloud Storage
	GoogleServiceAccount string `json:"google_service_account,omitempty"`
	// AWS Only: Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// Security policy for warehouses
	SecurityPolicy GetWorkspaceWarehouseConfigResponseSecurityPolicy `json:"security_policy,omitempty"`
	// SQL configuration parameters
	SqlConfigurationParameters *RepeatedEndpointConfPairs `json:"sql_configuration_parameters,omitempty"`
}

type GetWorkspaceWarehouseConfigResponseSecurityPolicy

type GetWorkspaceWarehouseConfigResponseSecurityPolicy string

Security policy for warehouses

const GetWorkspaceWarehouseConfigResponseSecurityPolicyDataAccessControl GetWorkspaceWarehouseConfigResponseSecurityPolicy = `DATA_ACCESS_CONTROL`
const GetWorkspaceWarehouseConfigResponseSecurityPolicyNone GetWorkspaceWarehouseConfigResponseSecurityPolicy = `NONE`
const GetWorkspaceWarehouseConfigResponseSecurityPolicyPassthrough GetWorkspaceWarehouseConfigResponseSecurityPolicy = `PASSTHROUGH`

func (*GetWorkspaceWarehouseConfigResponseSecurityPolicy) Set

Set raw string value and validate it against allowed values

func (*GetWorkspaceWarehouseConfigResponseSecurityPolicy) String

String representation for fmt.Print

func (*GetWorkspaceWarehouseConfigResponseSecurityPolicy) Type

Type always returns GetWorkspaceWarehouseConfigResponseSecurityPolicy to satisfy [pflag.Value] interface

type ListDashboardsRequest

type ListDashboardsRequest struct {
	// Name of dashboard attribute to order by.
	Order ListOrder `json:"-" url:"order,omitempty"`
	// Page number to retrieve.
	Page int `json:"-" url:"page,omitempty"`
	// Number of dashboards to return per page.
	PageSize int `json:"-" url:"page_size,omitempty"`
	// Full text search term.
	Q string `json:"-" url:"q,omitempty"`
}

Get dashboard objects

type ListOrder

type ListOrder string
const ListOrderCreatedAt ListOrder = `created_at`
const ListOrderName ListOrder = `name`

func (*ListOrder) Set

func (f *ListOrder) Set(v string) error

Set raw string value and validate it against allowed values

func (*ListOrder) String

func (f *ListOrder) String() string

String representation for fmt.Print

func (*ListOrder) Type

func (f *ListOrder) Type() string

Type always returns ListOrder to satisfy [pflag.Value] interface

type ListQueriesRequest

type ListQueriesRequest struct {
	// Name of query attribute to order by. Default sort order is ascending.
	// Append a dash (`-`) to order descending instead.
	//
	// - `name`: The name of the query.
	//
	// - `created_at`: The timestamp the query was created.
	//
	// - `runtime`: The time it took to run this query. This is blank for
	// parameterized queries. A blank value is treated as the highest value for
	// sorting.
	//
	// - `executed_at`: The timestamp when the query was last run.
	//
	// - `created_by`: The user name of the user that created the query.
	Order string `json:"-" url:"order,omitempty"`
	// Page number to retrieve.
	Page int `json:"-" url:"page,omitempty"`
	// Number of queries to return per page.
	PageSize int `json:"-" url:"page_size,omitempty"`
	// Full text search term
	Q string `json:"-" url:"q,omitempty"`
}

Get a list of queries

type ListQueriesResponse

type ListQueriesResponse struct {
	// Whether there is another page of results.
	HasNextPage bool `json:"has_next_page,omitempty"`
	// A token that can be used to get the next page of results.
	NextPageToken string `json:"next_page_token,omitempty"`

	Res []QueryInfo `json:"res,omitempty"`
}

type ListQueryHistoryRequest

type ListQueryHistoryRequest struct {
	// A filter to limit query history results. This field is optional.
	FilterBy *QueryFilter `json:"-" url:"filter_by,omitempty"`
	// Whether to include metrics about query.
	IncludeMetrics bool `json:"-" url:"include_metrics,omitempty"`
	// Limit the number of results returned in one page. The default is 100.
	MaxResults int `json:"-" url:"max_results,omitempty"`
	// A token that can be used to get the next page of results.
	PageToken string `json:"-" url:"page_token,omitempty"`
}

List Queries

type ListResponse

type ListResponse struct {
	// The total number of dashboards.
	Count int `json:"count,omitempty"`
	// The current page being displayed.
	Page int `json:"page,omitempty"`
	// The number of dashboards per page.
	PageSize int `json:"page_size,omitempty"`
	// List of dashboards returned.
	Results []Dashboard `json:"results,omitempty"`
}

type ListWarehousesRequest

type ListWarehousesRequest struct {
	// Service Principal which will be used to fetch the list of warehouses. If
	// not specified, the user from the session header is used.
	RunAsUserId int `json:"-" url:"run_as_user_id,omitempty"`
}

List warehouses

type ListWarehousesResponse

type ListWarehousesResponse struct {
	// A list of warehouses and their configurations.
	Warehouses []EndpointInfo `json:"warehouses,omitempty"`
}

type ObjectType

type ObjectType string

A singular noun object type.

const ObjectTypeAlert ObjectType = `alert`
const ObjectTypeDashboard ObjectType = `dashboard`
const ObjectTypeDataSource ObjectType = `data_source`
const ObjectTypeQuery ObjectType = `query`

func (*ObjectType) Set

func (f *ObjectType) Set(v string) error

Set raw string value and validate it against allowed values

func (*ObjectType) String

func (f *ObjectType) String() string

String representation for fmt.Print

func (*ObjectType) Type

func (f *ObjectType) Type() string

Type always returns ObjectType to satisfy [pflag.Value] interface

type ObjectTypePlural

type ObjectTypePlural string

Always a plural of the object type.

const ObjectTypePluralAlerts ObjectTypePlural = `alerts`
const ObjectTypePluralDashboards ObjectTypePlural = `dashboards`
const ObjectTypePluralDataSources ObjectTypePlural = `data_sources`
const ObjectTypePluralQueries ObjectTypePlural = `queries`

func (*ObjectTypePlural) Set

func (f *ObjectTypePlural) Set(v string) error

Set raw string value and validate it against allowed values

func (*ObjectTypePlural) String

func (f *ObjectTypePlural) String() string

String representation for fmt.Print

func (*ObjectTypePlural) Type

func (f *ObjectTypePlural) Type() string

Type always returns ObjectTypePlural to satisfy [pflag.Value] interface

type OdbcParams

type OdbcParams struct {
	Hostname string `json:"hostname,omitempty"`

	Path string `json:"path,omitempty"`

	Port int `json:"port,omitempty"`

	Protocol string `json:"protocol,omitempty"`
}

type OwnableObjectType

type OwnableObjectType string

The singular form of the type of object which can be owned.

const OwnableObjectTypeAlert OwnableObjectType = `alert`
const OwnableObjectTypeDashboard OwnableObjectType = `dashboard`
const OwnableObjectTypeQuery OwnableObjectType = `query`

func (*OwnableObjectType) Set

func (f *OwnableObjectType) Set(v string) error

Set raw string value and validate it against allowed values

func (*OwnableObjectType) String

func (f *OwnableObjectType) String() string

String representation for fmt.Print

func (*OwnableObjectType) Type

func (f *OwnableObjectType) Type() string

Type always returns OwnableObjectType to satisfy [pflag.Value] interface

type Parameter

type Parameter struct {
	// The literal parameter marker that appears between double curly braces in
	// the query text.
	Name string `json:"name,omitempty"`
	// The text displayed in a parameter picking widget.
	Title string `json:"title,omitempty"`
	// Parameters can have several different types.
	Type ParameterType `json:"type,omitempty"`
	// The default value for this parameter.
	Value any `json:"value,omitempty"`
}

type ParameterType

type ParameterType string

Parameters can have several different types.

const ParameterTypeDatetime ParameterType = `datetime`
const ParameterTypeNumber ParameterType = `number`
const ParameterTypeText ParameterType = `text`

func (*ParameterType) Set

func (f *ParameterType) Set(v string) error

Set raw string value and validate it against allowed values

func (*ParameterType) String

func (f *ParameterType) String() string

String representation for fmt.Print

func (*ParameterType) Type

func (f *ParameterType) Type() string

Type always returns ParameterType to satisfy [pflag.Value] interface

type PermissionLevel

type PermissionLevel string

This describes an enum

const PermissionLevelCanManage PermissionLevel = `CAN_MANAGE`

Can manage the query

const PermissionLevelCanRun PermissionLevel = `CAN_RUN`

Can run the query

const PermissionLevelCanView PermissionLevel = `CAN_VIEW`

Can view the query

func (*PermissionLevel) Set

func (f *PermissionLevel) Set(v string) error

Set raw string value and validate it against allowed values

func (*PermissionLevel) String

func (f *PermissionLevel) String() string

String representation for fmt.Print

func (*PermissionLevel) Type

func (f *PermissionLevel) Type() string

Type always returns PermissionLevel to satisfy [pflag.Value] interface

type PlansState

type PlansState string

Whether plans exist for the execution, or the reason why they are missing

const PlansStateEmpty PlansState = `EMPTY`
const PlansStateExists PlansState = `EXISTS`
const PlansStateIgnoredLargePlansSize PlansState = `IGNORED_LARGE_PLANS_SIZE`
const PlansStateIgnoredSmallDuration PlansState = `IGNORED_SMALL_DURATION`
const PlansStateIgnoredSparkPlanType PlansState = `IGNORED_SPARK_PLAN_TYPE`
const PlansStateUnknown PlansState = `UNKNOWN`

func (*PlansState) Set

func (f *PlansState) Set(v string) error

Set raw string value and validate it against allowed values

func (*PlansState) String

func (f *PlansState) String() string

String representation for fmt.Print

func (*PlansState) Type

func (f *PlansState) Type() string

Type always returns PlansState to satisfy [pflag.Value] interface

type QueriesAPI

type QueriesAPI struct {
	// contains filtered or unexported fields
}

These endpoints are used for CRUD operations on query definitions. Query definitions include the target SQL warehouse, query text, name, description, tags, parameters, and visualizations. Queries can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

func NewQueries

func NewQueries(client *client.DatabricksClient) *QueriesAPI

func (*QueriesAPI) Create

func (a *QueriesAPI) Create(ctx context.Context, request QueryPostContent) (*Query, error)

Create a new query definition.

Creates a new query definition. Queries created with this endpoint belong to the authenticated user making the request.

The `data_source_id` field specifies the ID of the SQL warehouse to run this query against. You can use the Data Sources API to see a complete list of available SQL warehouses. Or you can copy the `data_source_id` from an existing query.

**Note**: You cannot add a visualization until you create the query.

Example (Alerts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.QueryPostContent{
	Name:         fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	DataSourceId: srcs[0].Id,
	Description:  "test query from Go SDK",
	Query:        "SELECT 1",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

// cleanup

err = w.Queries.DeleteByQueryId(ctx, query.Id)
if err != nil {
	panic(err)
}
Output:

Example (Queries)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.QueryPostContent{
	Name:         fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	DataSourceId: srcs[0].Id,
	Description:  "test query from Go SDK",
	Query:        "SHOW TABLES",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

// cleanup

err = w.Queries.DeleteByQueryId(ctx, query.Id)
if err != nil {
	panic(err)
}
Output:

func (*QueriesAPI) Delete

func (a *QueriesAPI) Delete(ctx context.Context, request DeleteQueryRequest) error

Delete a query.

Moves a query to the trash. Trashed queries immediately disappear from searches and list views, and they cannot be used for alerts. The trash is deleted after 30 days.

func (*QueriesAPI) DeleteByQueryId

func (a *QueriesAPI) DeleteByQueryId(ctx context.Context, queryId string) error

Delete a query.

Moves a query to the trash. Trashed queries immediately disappear from searches and list views, and they cannot be used for alerts. The trash is deleted after 30 days.

func (*QueriesAPI) Get

func (a *QueriesAPI) Get(ctx context.Context, request GetQueryRequest) (*Query, error)

Get a query definition.

Retrieve a query object definition along with contextual permissions information about the currently authenticated user.

Example (Queries)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.QueryPostContent{
	Name:         fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	DataSourceId: srcs[0].Id,
	Description:  "test query from Go SDK",
	Query:        "SHOW TABLES",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

byId, err := w.Queries.GetByQueryId(ctx, query.Id)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = w.Queries.DeleteByQueryId(ctx, query.Id)
if err != nil {
	panic(err)
}
Output:

func (*QueriesAPI) GetByName

func (a *QueriesAPI) GetByName(ctx context.Context, name string) (*Query, error)

GetByName calls QueriesAPI.QueryNameToIdMap and returns a single Query.

Returns an error if there's more than one Query with the same .Name.

Note: All Query instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*QueriesAPI) GetByQueryId

func (a *QueriesAPI) GetByQueryId(ctx context.Context, queryId string) (*Query, error)

Get a query definition.

Retrieve a query object definition along with contextual permissions information about the currently authenticated user.

func (*QueriesAPI) Impl

func (a *QueriesAPI) Impl() QueriesService

Impl returns low-level Queries API implementation

func (*QueriesAPI) ListAll

func (a *QueriesAPI) ListAll(ctx context.Context, request ListQueriesRequest) ([]Query, error)

Get a list of queries.

Gets a list of queries. Optionally, this list can be filtered by a search term.

This method is generated by Databricks SDK Code Generator.

func (*QueriesAPI) QueryNameToIdMap

func (a *QueriesAPI) QueryNameToIdMap(ctx context.Context, request ListQueriesRequest) (map[string]string, error)

QueryNameToIdMap calls QueriesAPI.ListAll and creates a map of results with Query.Name as key and Query.Id as value.

Returns an error if there's more than one Query with the same .Name.

Note: All Query instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*QueriesAPI) Restore

func (a *QueriesAPI) Restore(ctx context.Context, request RestoreQueryRequest) error

Restore a query.

Restore a query that has been moved to the trash. A restored query appears in list views and searches. You can use restored queries for alerts.

func (*QueriesAPI) Update

func (a *QueriesAPI) Update(ctx context.Context, request QueryEditContent) (*Query, error)

Change a query definition.

Modify this query definition.

**Note**: You cannot undo this operation.

Example (Queries)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.QueryPostContent{
	Name:         fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	DataSourceId: srcs[0].Id,
	Description:  "test query from Go SDK",
	Query:        "SHOW TABLES",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

updated, err := w.Queries.Update(ctx, sql.QueryEditContent{
	QueryId:      query.Id,
	Name:         fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	DataSourceId: srcs[0].Id,
	Description:  "UPDATED: test query from Go SDK",
	Query:        "SELECT 2+2",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", updated)

// cleanup

err = w.Queries.DeleteByQueryId(ctx, query.Id)
if err != nil {
	panic(err)
}
Output:

func (*QueriesAPI) WithImpl

func (a *QueriesAPI) WithImpl(impl QueriesService) *QueriesAPI

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type QueriesService

type QueriesService interface {

	// Create a new query definition.
	//
	// Creates a new query definition. Queries created with this endpoint belong
	// to the authenticated user making the request.
	//
	// The `data_source_id` field specifies the ID of the SQL warehouse to run
	// this query against. You can use the Data Sources API to see a complete
	// list of available SQL warehouses. Or you can copy the `data_source_id`
	// from an existing query.
	//
	// **Note**: You cannot add a visualization until you create the query.
	Create(ctx context.Context, request QueryPostContent) (*Query, error)

	// Delete a query.
	//
	// Moves a query to the trash. Trashed queries immediately disappear from
	// searches and list views, and they cannot be used for alerts. The trash is
	// deleted after 30 days.
	Delete(ctx context.Context, request DeleteQueryRequest) error

	// Get a query definition.
	//
	// Retrieve a query object definition along with contextual permissions
	// information about the currently authenticated user.
	Get(ctx context.Context, request GetQueryRequest) (*Query, error)

	// Get a list of queries.
	//
	// Gets a list of queries. Optionally, this list can be filtered by a search
	// term.
	//
	// Use ListAll() to get all Query instances, which will iterate over every result page.
	List(ctx context.Context, request ListQueriesRequest) (*QueryList, error)

	// Restore a query.
	//
	// Restore a query that has been moved to the trash. A restored query
	// appears in list views and searches. You can use restored queries for
	// alerts.
	Restore(ctx context.Context, request RestoreQueryRequest) error

	// Change a query definition.
	//
	// Modify this query definition.
	//
	// **Note**: You cannot undo this operation.
	Update(ctx context.Context, request QueryEditContent) (*Query, error)
}

These endpoints are used for CRUD operations on query definitions. Query definitions include the target SQL warehouse, query text, name, description, tags, parameters, and visualizations. Queries can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

type Query

type Query struct {
	// Describes whether the authenticated user is allowed to edit the
	// definition of this query.
	CanEdit bool `json:"can_edit,omitempty"`
	// The timestamp when this query was created.
	CreatedAt string `json:"created_at,omitempty"`
	// Data source ID.
	DataSourceId string `json:"data_source_id,omitempty"`
	// General description that conveys additional information about this query
	// such as usage notes.
	Description string `json:"description,omitempty"`
	// Query ID.
	Id string `json:"id,omitempty"`
	// Indicates whether the query is trashed. Trashed queries can't be used in
	// dashboards, or appear in search results. If this boolean is `true`, the
	// `options` property for this query includes a `moved_to_trash_at`
	// timestamp. Trashed queries are permanently deleted after 30 days.
	IsArchived bool `json:"is_archived,omitempty"`
	// Whether the query is a draft. Draft queries only appear in list views for
	// their owners. Visualizations from draft queries cannot appear on
	// dashboards.
	IsDraft bool `json:"is_draft,omitempty"`
	// Whether this query object appears in the current user's favorites list.
	// This flag determines whether the star icon for favorites is selected.
	IsFavorite bool `json:"is_favorite,omitempty"`
	// Text parameter types are not safe from SQL injection for all types of
	// data source. Set this Boolean parameter to `true` if a query either does
	// not use any text type parameters or uses a data source type where text
	// type parameters are handled safely.
	IsSafe bool `json:"is_safe,omitempty"`

	LastModifiedBy *User `json:"last_modified_by,omitempty"`
	// The ID of the user who last saved changes to this query.
	LastModifiedById int `json:"last_modified_by_id,omitempty"`
	// If there is a cached result for this query and user, this field includes
	// the query result ID. If this query uses parameters, this field is always
	// null.
	LatestQueryDataId string `json:"latest_query_data_id,omitempty"`
	// The title of this query that appears in list views, widget headings, and
	// on the query page.
	Name string `json:"name,omitempty"`

	Options *QueryOptions `json:"options,omitempty"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`
	// This describes an enum
	PermissionTier PermissionLevel `json:"permission_tier,omitempty"`
	// The text of the query to be run.
	Query string `json:"query,omitempty"`
	// A SHA-256 hash of the query text along with the authenticated user ID.
	QueryHash string `json:"query_hash,omitempty"`

	Tags []string `json:"tags,omitempty"`
	// The timestamp at which this query was last updated.
	UpdatedAt string `json:"updated_at,omitempty"`

	User *User `json:"user,omitempty"`
	// The ID of the user who created this query.
	UserId int `json:"user_id,omitempty"`

	Visualizations []Visualization `json:"visualizations,omitempty"`
}

type QueryEditContent added in v0.3.0

type QueryEditContent struct {
	// Data source ID.
	DataSourceId string `json:"data_source_id,omitempty"`
	// General description that conveys additional information about this query
	// such as usage notes.
	Description string `json:"description,omitempty"`
	// The title of this query that appears in list views, widget headings, and
	// on the query page.
	Name string `json:"name,omitempty"`
	// Exclusively used for storing a list parameter definitions. A parameter is
	// an object with `title`, `name`, `type`, and `value` properties. The
	// `value` field here is the default value. It can be overridden at runtime.
	Options any `json:"options,omitempty"`
	// The text of the query to be run.
	Query string `json:"query,omitempty"`

	QueryId string `json:"-" url:"-"`
}

type QueryFilter

type QueryFilter struct {
	QueryStartTimeRange *TimeRange `json:"query_start_time_range,omitempty"`

	Statuses []QueryStatus `json:"statuses,omitempty"`
	// A list of user IDs who ran the queries.
	UserIds []int `json:"user_ids,omitempty"`
	// A list of warehouse IDs.
	WarehouseIds []string `json:"warehouse_ids,omitempty"`
}

A filter to limit query history results. This field is optional.

type QueryHistoryAPI

type QueryHistoryAPI struct {
	// contains filtered or unexported fields
}

Access the history of queries through SQL warehouses.

func NewQueryHistory

func NewQueryHistory(client *client.DatabricksClient) *QueryHistoryAPI

func (*QueryHistoryAPI) Impl

Impl returns low-level QueryHistory API implementation

func (*QueryHistoryAPI) ListAll

func (a *QueryHistoryAPI) ListAll(ctx context.Context, request ListQueryHistoryRequest) ([]QueryInfo, error)

List Queries.

List the history of queries through SQL warehouses.

You can filter by user ID, warehouse ID, status, and time range.

This method is generated by Databricks SDK Code Generator.

func (*QueryHistoryAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type QueryHistoryService

type QueryHistoryService interface {

	// List Queries.
	//
	// List the history of queries through SQL warehouses.
	//
	// You can filter by user ID, warehouse ID, status, and time range.
	//
	// Use ListAll() to get all QueryInfo instances, which will iterate over every result page.
	List(ctx context.Context, request ListQueryHistoryRequest) (*ListQueriesResponse, error)
}

Access the history of queries through SQL warehouses.

type QueryInfo

type QueryInfo struct {
	// Channel information for the SQL warehouse at the time of query execution
	ChannelUsed *ChannelInfo `json:"channel_used,omitempty"`
	// Total execution time of the query from the client’s point of view, in
	// milliseconds.
	Duration int `json:"duration,omitempty"`
	// Alias for `warehouse_id`.
	EndpointId string `json:"endpoint_id,omitempty"`
	// Message describing why the query could not complete.
	ErrorMessage string `json:"error_message,omitempty"`
	// The ID of the user whose credentials were used to run the query.
	ExecutedAsUserId int `json:"executed_as_user_id,omitempty"`
	// The email address or username of the user whose credentials were used to
	// run the query.
	ExecutedAsUserName string `json:"executed_as_user_name,omitempty"`
	// The time execution of the query ended.
	ExecutionEndTimeMs int `json:"execution_end_time_ms,omitempty"`
	// Whether more updates for the query are expected.
	IsFinal bool `json:"is_final,omitempty"`
	// A key that can be used to look up query details.
	LookupKey string `json:"lookup_key,omitempty"`
	// Metrics about query execution.
	Metrics *QueryMetrics `json:"metrics,omitempty"`
	// Whether plans exist for the execution, or the reason why they are missing
	PlansState PlansState `json:"plans_state,omitempty"`
	// The time the query ended.
	QueryEndTimeMs int `json:"query_end_time_ms,omitempty"`
	// The query ID.
	QueryId string `json:"query_id,omitempty"`
	// The time the query started.
	QueryStartTimeMs int `json:"query_start_time_ms,omitempty"`
	// The text of the query.
	QueryText string `json:"query_text,omitempty"`
	// The number of results returned by the query.
	RowsProduced int `json:"rows_produced,omitempty"`
	// URL to the query plan.
	SparkUiUrl string `json:"spark_ui_url,omitempty"`
	// Type of statement for this query
	StatementType QueryStatementType `json:"statement_type,omitempty"`
	// This describes an enum
	Status QueryStatus `json:"status,omitempty"`
	// The ID of the user who ran the query.
	UserId int `json:"user_id,omitempty"`
	// The email address or username of the user who ran the query.
	UserName string `json:"user_name,omitempty"`
	// Warehouse ID.
	WarehouseId string `json:"warehouse_id,omitempty"`
}

type QueryList

type QueryList struct {
	// The total number of queries.
	Count int `json:"count,omitempty"`
	// The page number that is currently displayed.
	Page int `json:"page,omitempty"`
	// The number of queries per page.
	PageSize int `json:"page_size,omitempty"`
	// List of queries returned.
	Results []Query `json:"results,omitempty"`
}

type QueryMetrics

type QueryMetrics struct {
	// Time spent loading metadata and optimizing the query, in milliseconds.
	CompilationTimeMs int `json:"compilation_time_ms,omitempty"`
	// Time spent executing the query, in milliseconds.
	ExecutionTimeMs int `json:"execution_time_ms,omitempty"`
	// Total amount of data sent over the network between executor nodes during
	// shuffle, in bytes.
	NetworkSentBytes int `json:"network_sent_bytes,omitempty"`
	// Total execution time for all individual Photon query engine tasks in the
	// query, in milliseconds.
	PhotonTotalTimeMs int `json:"photon_total_time_ms,omitempty"`
	// Time spent waiting to execute the query because the SQL warehouse is
	// already running the maximum number of concurrent queries, in
	// milliseconds.
	QueuedOverloadTimeMs int `json:"queued_overload_time_ms,omitempty"`
	// Time waiting for compute resources to be provisioned for the SQL
	// warehouse, in milliseconds.
	QueuedProvisioningTimeMs int `json:"queued_provisioning_time_ms,omitempty"`
	// Total size of data read by the query, in bytes.
	ReadBytes int `json:"read_bytes,omitempty"`
	// Size of persistent data read from the cache, in bytes.
	ReadCacheBytes int `json:"read_cache_bytes,omitempty"`
	// Number of files read after pruning.
	ReadFilesCount int `json:"read_files_count,omitempty"`
	// Number of partitions read after pruning.
	ReadPartitionsCount int `json:"read_partitions_count,omitempty"`
	// Size of persistent data read from cloud object storage on your cloud
	// tenant, in bytes.
	ReadRemoteBytes int `json:"read_remote_bytes,omitempty"`
	// Time spent fetching the query results after the execution finished, in
	// milliseconds.
	ResultFetchTimeMs int `json:"result_fetch_time_ms,omitempty"`
	// true if the query result was fetched from cache, false otherwise.
	ResultFromCache bool `json:"result_from_cache,omitempty"`
	// Total number of rows returned by the query.
	RowsProducedCount int `json:"rows_produced_count,omitempty"`
	// Total number of rows read by the query.
	RowsReadCount int `json:"rows_read_count,omitempty"`
	// Size of data temporarily written to disk while executing the query, in
	// bytes.
	SpillToDiskBytes int `json:"spill_to_disk_bytes,omitempty"`
	// Sum of execution time for all of the query’s tasks, in milliseconds.
	TaskTotalTimeMs int `json:"task_total_time_ms,omitempty"`
	// Number of files that would have been read without pruning.
	TotalFilesCount int `json:"total_files_count,omitempty"`
	// Number of partitions that would have been read without pruning.
	TotalPartitionsCount int `json:"total_partitions_count,omitempty"`
	// Total execution time of the query from the client’s point of view, in
	// milliseconds.
	TotalTimeMs int `json:"total_time_ms,omitempty"`
	// Size pf persistent data written to cloud object storage in your cloud
	// tenant, in bytes.
	WriteRemoteBytes int `json:"write_remote_bytes,omitempty"`
}

Metrics about query execution.

type QueryOptions

type QueryOptions struct {
	// The timestamp when this query was moved to trash. Only present when the
	// `is_archived` property is `true`. Trashed items are deleted after thirty
	// days.
	MovedToTrashAt string `json:"moved_to_trash_at,omitempty"`

	Parameters []Parameter `json:"parameters,omitempty"`
}

type QueryPostContent

type QueryPostContent struct {
	// Data source ID.
	DataSourceId string `json:"data_source_id,omitempty"`
	// General description that conveys additional information about this query
	// such as usage notes.
	Description string `json:"description,omitempty"`
	// The title of this query that appears in list views, widget headings, and
	// on the query page.
	Name string `json:"name,omitempty"`
	// Exclusively used for storing a list parameter definitions. A parameter is
	// an object with `title`, `name`, `type`, and `value` properties. The
	// `value` field here is the default value. It can be overridden at runtime.
	Options any `json:"options,omitempty"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`
	// The text of the query to be run.
	Query string `json:"query,omitempty"`
}

type QueryStatementType

type QueryStatementType string

Type of statement for this query

const QueryStatementTypeAlter QueryStatementType = `ALTER`
const QueryStatementTypeAnalyze QueryStatementType = `ANALYZE`
const QueryStatementTypeCopy QueryStatementType = `COPY`
const QueryStatementTypeCreate QueryStatementType = `CREATE`
const QueryStatementTypeDelete QueryStatementType = `DELETE`
const QueryStatementTypeDescribe QueryStatementType = `DESCRIBE`
const QueryStatementTypeDrop QueryStatementType = `DROP`
const QueryStatementTypeExplain QueryStatementType = `EXPLAIN`
const QueryStatementTypeGrant QueryStatementType = `GRANT`
const QueryStatementTypeInsert QueryStatementType = `INSERT`
const QueryStatementTypeMerge QueryStatementType = `MERGE`
const QueryStatementTypeOptimize QueryStatementType = `OPTIMIZE`
const QueryStatementTypeOther QueryStatementType = `OTHER`
const QueryStatementTypeRefresh QueryStatementType = `REFRESH`
const QueryStatementTypeReplace QueryStatementType = `REPLACE`
const QueryStatementTypeRevoke QueryStatementType = `REVOKE`
const QueryStatementTypeSelect QueryStatementType = `SELECT`
const QueryStatementTypeSet QueryStatementType = `SET`
const QueryStatementTypeShow QueryStatementType = `SHOW`
const QueryStatementTypeTruncate QueryStatementType = `TRUNCATE`
const QueryStatementTypeUpdate QueryStatementType = `UPDATE`
const QueryStatementTypeUse QueryStatementType = `USE`

func (*QueryStatementType) Set

func (f *QueryStatementType) Set(v string) error

Set raw string value and validate it against allowed values

func (*QueryStatementType) String

func (f *QueryStatementType) String() string

String representation for fmt.Print

func (*QueryStatementType) Type

func (f *QueryStatementType) Type() string

Type always returns QueryStatementType to satisfy [pflag.Value] interface

type QueryStatus

type QueryStatus string

This describes an enum

const QueryStatusCanceled QueryStatus = `CANCELED`

Query has been cancelled by the user.

const QueryStatusFailed QueryStatus = `FAILED`

Query has failed.

const QueryStatusFinished QueryStatus = `FINISHED`

Query has completed.

const QueryStatusQueued QueryStatus = `QUEUED`

Query has been received and queued.

const QueryStatusRunning QueryStatus = `RUNNING`

Query has started.

func (*QueryStatus) Set

func (f *QueryStatus) Set(v string) error

Set raw string value and validate it against allowed values

func (*QueryStatus) String

func (f *QueryStatus) String() string

String representation for fmt.Print

func (*QueryStatus) Type

func (f *QueryStatus) Type() string

Type always returns QueryStatus to satisfy [pflag.Value] interface

type RepeatedEndpointConfPairs

type RepeatedEndpointConfPairs struct {
	// Deprecated: Use configuration_pairs
	ConfigPair []EndpointConfPair `json:"config_pair,omitempty"`

	ConfigurationPairs []EndpointConfPair `json:"configuration_pairs,omitempty"`
}

type RestoreDashboardRequest

type RestoreDashboardRequest struct {
	DashboardId string `json:"-" url:"-"`
}

Restore a dashboard

type RestoreQueryRequest

type RestoreQueryRequest struct {
	QueryId string `json:"-" url:"-"`
}

Restore a query

type ResultData added in v0.3.0

type ResultData struct {
	// Number of bytes in the result chunk.
	ByteCount int64 `json:"byte_count,omitempty"`
	// Position within the sequence of result set chunks.
	ChunkIndex int `json:"chunk_index,omitempty"`
	// `JSON_ARRAY` format is an array of arrays of values, where each non-null
	// value is formatted as a string. Null values are encoded as JSON `null`.
	DataArray [][]string `json:"data_array,omitempty"`

	ExternalLinks []ExternalLink `json:"external_links,omitempty"`
	// When fetching, gives `chunk_index` for the _next_ chunk; if absent,
	// indicates there are no more chunks.
	NextChunkIndex int `json:"next_chunk_index,omitempty"`
	// When fetching, gives `internal_link` for the _next_ chunk; if absent,
	// indicates there are no more chunks.
	NextChunkInternalLink string `json:"next_chunk_internal_link,omitempty"`
	// Number of rows within the result chunk.
	RowCount int64 `json:"row_count,omitempty"`
	// Starting row offset within the result set.
	RowOffset int64 `json:"row_offset,omitempty"`
}

Result data chunks are delivered in either the `chunk` field when using `INLINE` disposition, or in the `external_link` field when using `EXTERNAL_LINKS` disposition. Exactly one of these will be set.

type ResultManifest added in v0.3.0

type ResultManifest struct {
	// Array of result set chunk metadata.
	Chunks []ChunkInfo `json:"chunks,omitempty"`
	// Statement execution supports three result formats: `JSON_ARRAY`
	// (default), `ARROW_STREAM`, and `CSV`.
	//
	// When specifying `format=JSON_ARRAY`, result data will be formatted as an
	// array of arrays of values, where each value is either the *string
	// representation* of a value, or `null`. For example, the output of `SELECT
	// concat('id-', id) AS strCol, id AS intCol, null AS nullCol FROM range(3)`
	// would look like this:
	//
	// “` [ [ "id-1", "1", null ], [ "id-2", "2", null ], [ "id-3", "3", null
	// ], ] “`
	//
	// `JSON_ARRAY` is supported with `INLINE` and `EXTERNAL_LINKS`
	// dispositions.
	//
	// `INLINE` `JSON_ARRAY` data can be found at the path
	// `StatementResponse.result.data_array`.
	//
	// For `EXTERNAL_LINKS` `JSON_ARRAY` results, each URL points to a file in
	// cloud storage that contains compact JSON with no indentation or extra
	// whitespace.
	//
	// When specifying `format=ARROW_STREAM`, each chunk in the result will be
	// formatted as Apache Arrow Stream. See the [Apache Arrow streaming
	// format].
	//
	// IMPORTANT: The format `ARROW_STREAM` is supported only with
	// `EXTERNAL_LINKS` disposition.
	//
	// When specifying `format=CSV`, each chunk in the result will be a CSV
	// according to [RFC 4180] standard. All the columns values will have
	// *string representation* similar to the `JSON_ARRAY` format, and `null`
	// values will be encoded as “null”. Only the first chunk in the result
	// would contain a header row with column names. For example, the output of
	// `SELECT concat('id-', id) AS strCol, id AS intCol, null as nullCol FROM
	// range(3)` would look like this:
	//
	// “` strCol,intCol,nullCol id-1,1,null id-2,2,null id-3,3,null “`
	//
	// IMPORTANT: The format `CSV` is supported only with `EXTERNAL_LINKS`
	// disposition.
	//
	// [Apache Arrow streaming format]: https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format
	// [RFC 4180]: https://www.rfc-editor.org/rfc/rfc4180
	Format Format `json:"format,omitempty"`
	// Schema is an ordered list of column descriptions.
	Schema *ResultSchema `json:"schema,omitempty"`
	// Total number of bytes in the result set.
	TotalByteCount int64 `json:"total_byte_count,omitempty"`
	// Total number of chunks that the result set has been divided into.
	TotalChunkCount int `json:"total_chunk_count,omitempty"`
	// Total number of rows in the result set.
	TotalRowCount int64 `json:"total_row_count,omitempty"`
}

The result manifest provides schema and metadata for the result set.

type ResultSchema added in v0.3.0

type ResultSchema struct {
	ColumnCount int `json:"column_count,omitempty"`

	Columns []ColumnInfo `json:"columns,omitempty"`
}

Schema is an ordered list of column descriptions.

type ServiceError added in v0.3.0

type ServiceError struct {
	ErrorCode ServiceErrorCode `json:"error_code,omitempty"`
	// Brief summary of error condition.
	Message string `json:"message,omitempty"`
}

type ServiceErrorCode added in v0.3.0

type ServiceErrorCode string
const ServiceErrorCodeAborted ServiceErrorCode = `ABORTED`
const ServiceErrorCodeAlreadyExists ServiceErrorCode = `ALREADY_EXISTS`
const ServiceErrorCodeBadRequest ServiceErrorCode = `BAD_REQUEST`
const ServiceErrorCodeCancelled ServiceErrorCode = `CANCELLED`
const ServiceErrorCodeDeadlineExceeded ServiceErrorCode = `DEADLINE_EXCEEDED`
const ServiceErrorCodeInternalError ServiceErrorCode = `INTERNAL_ERROR`
const ServiceErrorCodeIoError ServiceErrorCode = `IO_ERROR`
const ServiceErrorCodeNotFound ServiceErrorCode = `NOT_FOUND`
const ServiceErrorCodeResourceExhausted ServiceErrorCode = `RESOURCE_EXHAUSTED`
const ServiceErrorCodeServiceUnderMaintenance ServiceErrorCode = `SERVICE_UNDER_MAINTENANCE`
const ServiceErrorCodeTemporarilyUnavailable ServiceErrorCode = `TEMPORARILY_UNAVAILABLE`
const ServiceErrorCodeUnauthenticated ServiceErrorCode = `UNAUTHENTICATED`
const ServiceErrorCodeUnknown ServiceErrorCode = `UNKNOWN`
const ServiceErrorCodeWorkspaceTemporarilyUnavailable ServiceErrorCode = `WORKSPACE_TEMPORARILY_UNAVAILABLE`

func (*ServiceErrorCode) Set added in v0.3.0

func (f *ServiceErrorCode) Set(v string) error

Set raw string value and validate it against allowed values

func (*ServiceErrorCode) String added in v0.3.0

func (f *ServiceErrorCode) String() string

String representation for fmt.Print

func (*ServiceErrorCode) Type added in v0.3.0

func (f *ServiceErrorCode) Type() string

Type always returns ServiceErrorCode to satisfy [pflag.Value] interface

type SetRequest

type SetRequest struct {
	AccessControlList []AccessControl `json:"access_control_list,omitempty"`
	// Object ID. The ACL for the object with this UUID is overwritten by this
	// request's POST content.
	ObjectId string `json:"-" url:"-"`
	// The type of object permission to set.
	ObjectType ObjectTypePlural `json:"-" url:"-"`
}

Set object ACL

type SetResponse

type SetResponse struct {
	AccessControlList []AccessControl `json:"access_control_list,omitempty"`
	// An object's type and UUID, separated by a forward slash (/) character.
	ObjectId string `json:"object_id,omitempty"`
	// A singular noun object type.
	ObjectType ObjectType `json:"object_type,omitempty"`
}

type SetWorkspaceWarehouseConfigRequest

type SetWorkspaceWarehouseConfigRequest struct {
	// Optional: Channel selection details
	Channel *Channel `json:"channel,omitempty"`
	// Deprecated: Use sql_configuration_parameters
	ConfigParam *RepeatedEndpointConfPairs `json:"config_param,omitempty"`
	// Spark confs for external hive metastore configuration JSON serialized
	// size must be less than <= 512K
	DataAccessConfig []EndpointConfPair `json:"data_access_config,omitempty"`
	// List of Warehouse Types allowed in this workspace (limits allowed value
	// of the type field in CreateWarehouse and EditWarehouse). Note: Some types
	// cannot be disabled, they don't need to be specified in
	// SetWorkspaceWarehouseConfig. Note: Disabling a type may cause existing
	// warehouses to be converted to another type. Used by frontend to save
	// specific type availability in the warehouse create and edit form UI.
	EnabledWarehouseTypes []WarehouseTypePair `json:"enabled_warehouse_types,omitempty"`
	// Deprecated: Use sql_configuration_parameters
	GlobalParam *RepeatedEndpointConfPairs `json:"global_param,omitempty"`
	// GCP only: Google Service Account used to pass to cluster to access Google
	// Cloud Storage
	GoogleServiceAccount string `json:"google_service_account,omitempty"`
	// AWS Only: Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// Security policy for warehouses
	SecurityPolicy SetWorkspaceWarehouseConfigRequestSecurityPolicy `json:"security_policy,omitempty"`
	// SQL configuration parameters
	SqlConfigurationParameters *RepeatedEndpointConfPairs `json:"sql_configuration_parameters,omitempty"`
}

type SetWorkspaceWarehouseConfigRequestSecurityPolicy

type SetWorkspaceWarehouseConfigRequestSecurityPolicy string

Security policy for warehouses

const SetWorkspaceWarehouseConfigRequestSecurityPolicyDataAccessControl SetWorkspaceWarehouseConfigRequestSecurityPolicy = `DATA_ACCESS_CONTROL`
const SetWorkspaceWarehouseConfigRequestSecurityPolicyNone SetWorkspaceWarehouseConfigRequestSecurityPolicy = `NONE`
const SetWorkspaceWarehouseConfigRequestSecurityPolicyPassthrough SetWorkspaceWarehouseConfigRequestSecurityPolicy = `PASSTHROUGH`

func (*SetWorkspaceWarehouseConfigRequestSecurityPolicy) Set

Set raw string value and validate it against allowed values

func (*SetWorkspaceWarehouseConfigRequestSecurityPolicy) String

String representation for fmt.Print

func (*SetWorkspaceWarehouseConfigRequestSecurityPolicy) Type

Type always returns SetWorkspaceWarehouseConfigRequestSecurityPolicy to satisfy [pflag.Value] interface

type SpotInstancePolicy

type SpotInstancePolicy string

Configurations whether the warehouse should use spot instances.

const SpotInstancePolicyCostOptimized SpotInstancePolicy = `COST_OPTIMIZED`
const SpotInstancePolicyPolicyUnspecified SpotInstancePolicy = `POLICY_UNSPECIFIED`
const SpotInstancePolicyReliabilityOptimized SpotInstancePolicy = `RELIABILITY_OPTIMIZED`

func (*SpotInstancePolicy) Set

func (f *SpotInstancePolicy) Set(v string) error

Set raw string value and validate it against allowed values

func (*SpotInstancePolicy) String

func (f *SpotInstancePolicy) String() string

String representation for fmt.Print

func (*SpotInstancePolicy) Type

func (f *SpotInstancePolicy) Type() string

Type always returns SpotInstancePolicy to satisfy [pflag.Value] interface

type StartRequest

type StartRequest struct {
	// Required. Id of the SQL warehouse.
	Id string `json:"-" url:"-"`
}

Start a warehouse

type State

type State string

State of the warehouse

const StateDeleted State = `DELETED`
const StateDeleting State = `DELETING`
const StateRunning State = `RUNNING`
const StateStarting State = `STARTING`
const StateStopped State = `STOPPED`
const StateStopping State = `STOPPING`

func (*State) Set

func (f *State) Set(v string) error

Set raw string value and validate it against allowed values

func (*State) String

func (f *State) String() string

String representation for fmt.Print

func (*State) Type

func (f *State) Type() string

Type always returns State to satisfy [pflag.Value] interface

type StatementExecutionAPI added in v0.3.0

type StatementExecutionAPI struct {
	// contains filtered or unexported fields
}

The SQL Statement Execution API manages the execution of arbitrary SQL statements and the fetching of result data.

**Release status**

This feature is in Public Preview.

**Getting started**

We suggest beginning with the SQL Statement Execution API tutorial.

**Overview of statement execution and result fetching**

Statement execution begins by issuing a :method:statementexecution/executeStatement request with a valid SQL statement and warehouse ID, along with optional parameters such as the data catalog and output format.

When submitting the statement, the call can behave synchronously or asynchronously, based on the `wait_timeout` setting. When set between 5-50 seconds (default: 10) the call behaves synchronously and waits for results up to the specified timeout; when set to `0s`, the call is asynchronous and responds immediately with a statement ID that can be used to poll for status or fetch the results in a separate call.

**Call mode: synchronous**

In synchronous mode, when statement execution completes within the `wait timeout`, the result data is returned directly in the response. This response will contain `statement_id`, `status`, `manifest`, and `result` fields. The `status` field confirms success whereas the `manifest` field contains the result data column schema and metadata about the result set. The `result` field contains the first chunk of result data according to the specified `disposition`, and links to fetch any remaining chunks.

If the execution does not complete before `wait_timeout`, the setting `on_wait_timeout` determines how the system responds.

By default, `on_wait_timeout=CONTINUE`, and after reaching `wait_timeout`, a response is returned and statement execution continues asynchronously. The response will contain only `statement_id` and `status` fields, and the caller must now follow the flow described for asynchronous call mode to poll and fetch the result.

Alternatively, `on_wait_timeout` can also be set to `CANCEL`; in this case if the timeout is reached before execution completes, the underlying statement execution is canceled, and a `CANCELED` status is returned in the response.

**Call mode: asynchronous**

In asynchronous mode, or after a timed-out synchronous request continues, a `statement_id` and `status` will be returned. In this case polling :method:statementexecution/getStatement calls are required to fetch the result and metadata.

Next, a caller must poll until execution completes (`SUCCEEDED`, `FAILED`, etc.) by issuing :method:statementexecution/getStatement requests for the given `statement_id`.

When execution has succeeded, the response will contain `status`, `manifest`, and `result` fields. These fields and the structure are identical to those in the response to a successful synchronous submission. The `result` field will contain the first chunk of result data, either `INLINE` or as `EXTERNAL_LINKS` depending on `disposition`. Additional chunks of result data can be fetched by checking for the presence of the `next_chunk_internal_link` field, and iteratively `GET` those paths until that field is unset: `GET https://$DATABRICKS_HOST/{next_chunk_internal_link}`.

**Fetching result data: format and disposition**

To specify the result data format, set the `format` field to `JSON_ARRAY` (JSON), `ARROW_STREAM` (Apache Arrow Columnar), or `CSV`.

You can also configure how to fetch the result data in two different modes by setting the `disposition` field to `INLINE` or `EXTERNAL_LINKS`.

The `INLINE` disposition can only be used with the `JSON_ARRAY` format and allows results up to 16 MiB. When a statement executed with `INLINE` disposition exceeds this limit, the execution is aborted, and no result can be fetched.

The `EXTERNAL_LINKS` disposition allows fetching large result sets in `JSON_ARRAY`, `ARROW_STREAM` and `CSV` formats, and with higher throughput.

The API uses defaults of `format=JSON_ARRAY` and `disposition=INLINE`. Databricks recommends that you explicit setting the format and the disposition for all production use cases.

**Statement response: statement_id, status, manifest, and result**

The base call :method:statementexecution/getStatement returns a single response combining `statement_id`, `status`, a result `manifest`, and a `result` data chunk or link, depending on the `disposition`. The `manifest` contains the result schema definition and the result summary metadata. When using `disposition=EXTERNAL_LINKS`, it also contains a full listing of all chunks and their summary metadata.

**Use case: small result sets with INLINE + JSON_ARRAY**

For flows that generate small and predictable result sets (<= 16 MiB), `INLINE` downloads of `JSON_ARRAY` result data are typically the simplest way to execute and fetch result data.

When the result set with `disposition=INLINE` is larger, the result can be transferred in chunks. After receiving the initial chunk with :method:statementexecution/executeStatement or :method:statementexecution/getStatement subsequent calls are required to iteratively fetch each chunk. Each result response contains a link to the next chunk, when there are additional chunks to fetch; it can be found in the field `.next_chunk_internal_link`. This link is an absolute `path` to be joined with your `$DATABRICKS_HOST`, and of the form `/api/2.0/sql/statements/{statement_id}/result/chunks/{chunk_index}`. The next chunk can be fetched by issuing a :method:statementexecution/getStatementResultChunkN request.

When using this mode, each chunk may be fetched once, and in order. A chunk without a field `next_chunk_internal_link` indicates the last chunk was reached and all chunks have been fetched from the result set.

**Use case: large result sets with EXTERNAL_LINKS + ARROW_STREAM**

Using `EXTERNAL_LINKS` to fetch result data in Arrow format allows you to fetch large result sets efficiently. The primary difference from using `INLINE` disposition is that fetched result chunks contain resolved `external_links` URLs, which can be fetched with standard HTTP.

**Presigned URLs**

External links point to data stored within your workspace's internal DBFS, in the form of a presigned URL. The URLs are valid for only a short period, <= 15 minutes. Alongside each `external_link` is an expiration field indicating the time at which the URL is no longer valid. In `EXTERNAL_LINKS` mode, chunks can be resolved and fetched multiple times and in parallel.

----

### **Warning: We recommend you protect the URLs in the EXTERNAL_LINKS.**

When using the EXTERNAL_LINKS disposition, a short-lived pre-signed URL is generated, which the client can use to download the result chunk directly from cloud storage. As the short-lived credential is embedded in a pre-signed URL, this URL should be protected.

Since pre-signed URLs are generated with embedded temporary credentials, you need to remove the authorization header from the fetch requests.

----

Similar to `INLINE` mode, callers can iterate through the result set, by using the `next_chunk_internal_link` field. Each internal link response will contain an external link to the raw chunk data, and additionally contain the `next_chunk_internal_link` if there are more chunks.

Unlike `INLINE` mode, when using `EXTERNAL_LINKS`, chunks may be fetched out of order, and in parallel to achieve higher throughput.

**Limits and limitations**

Note: All byte limits are calculated based on internal storage metrics and will not match byte counts of actual payloads.

- Statements with `disposition=INLINE` are limited to 16 MiB and will abort when this limit is exceeded. - Statements with `disposition=EXTERNAL_LINKS` are limited to 100 GiB. - The maximum query text size is 16 MiB. - Cancelation may silently fail. A successful response from a cancel request indicates that the cancel request was successfully received and sent to the processing engine. However, for example, an outstanding statement may complete execution during signal delivery, with the cancel signal arriving too late to be meaningful. Polling for status until a terminal state is reached is a reliable way to determine the final state. - Wait timeouts are approximate, occur server-side, and cannot account for caller delays, network latency from caller to service, and similarly. - After a statement has been submitted and a statement_id is returned, that statement's status and result will automatically close after either of 2 conditions: - The last result chunk is fetched (or resolved to an external link). - One hour passes with no calls to get the status or fetch the result. Best practice: in asynchronous clients, poll for status regularly (and with backoff) to keep the statement open and alive. - After fetching the last result chunk (including chunk_index=0) the statement is automatically closed.

func NewStatementExecution added in v0.3.0

func NewStatementExecution(client *client.DatabricksClient) *StatementExecutionAPI

func (*StatementExecutionAPI) CancelExecution added in v0.3.0

func (a *StatementExecutionAPI) CancelExecution(ctx context.Context, request CancelExecutionRequest) error

Cancel statement execution.

Requests that an executing statement be canceled. Callers must poll for status to see the terminal state.

func (*StatementExecutionAPI) ExecuteAndWait added in v0.10.0

[EXPERIMENTAL] Execute a query and wait for results to be available

func (*StatementExecutionAPI) ExecuteStatement added in v0.3.0

Execute a SQL statement.

Execute a SQL statement, and if flagged as such, await its result for a specified time.

func (*StatementExecutionAPI) GetStatement added in v0.3.0

Get status, manifest, and result first chunk.

This request can be used to poll for the statement's status. When the `status.state` field is `SUCCEEDED` it will also return the result manifest and the first chunk of the result data. When the statement is in the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200 with the state set. After at least 12 hours in terminal state, the statement is removed from the warehouse and further calls will receive an HTTP 404 response.

**NOTE** This call currently may take up to 5 seconds to get the latest status and result.

func (*StatementExecutionAPI) GetStatementByStatementId added in v0.3.0

func (a *StatementExecutionAPI) GetStatementByStatementId(ctx context.Context, statementId string) (*GetStatementResponse, error)

Get status, manifest, and result first chunk.

This request can be used to poll for the statement's status. When the `status.state` field is `SUCCEEDED` it will also return the result manifest and the first chunk of the result data. When the statement is in the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200 with the state set. After at least 12 hours in terminal state, the statement is removed from the warehouse and further calls will receive an HTTP 404 response.

**NOTE** This call currently may take up to 5 seconds to get the latest status and result.

func (*StatementExecutionAPI) GetStatementResultChunkN added in v0.3.0

func (a *StatementExecutionAPI) GetStatementResultChunkN(ctx context.Context, request GetStatementResultChunkNRequest) (*ResultData, error)

Get result chunk by index.

After the statement execution has `SUCCEEDED`, the result data can be fetched by chunks. Whereas the first chuck with `chunk_index=0` is typically fetched through a `get status` request, subsequent chunks can be fetched using a `get result` request. The response structure is identical to the nested `result` element described in the `get status` request, and similarly includes the `next_chunk_index` and `next_chunk_internal_link` fields for simple iteration through the result set.

func (*StatementExecutionAPI) GetStatementResultChunkNByStatementIdAndChunkIndex added in v0.3.0

func (a *StatementExecutionAPI) GetStatementResultChunkNByStatementIdAndChunkIndex(ctx context.Context, statementId string, chunkIndex int) (*ResultData, error)

Get result chunk by index.

After the statement execution has `SUCCEEDED`, the result data can be fetched by chunks. Whereas the first chuck with `chunk_index=0` is typically fetched through a `get status` request, subsequent chunks can be fetched using a `get result` request. The response structure is identical to the nested `result` element described in the `get status` request, and similarly includes the `next_chunk_index` and `next_chunk_internal_link` fields for simple iteration through the result set.

func (*StatementExecutionAPI) Impl added in v0.3.0

Impl returns low-level StatementExecution API implementation

func (*StatementExecutionAPI) WithImpl added in v0.3.0

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type StatementExecutionService added in v0.3.0

type StatementExecutionService interface {

	// Cancel statement execution.
	//
	// Requests that an executing statement be canceled. Callers must poll for
	// status to see the terminal state.
	CancelExecution(ctx context.Context, request CancelExecutionRequest) error

	// Execute a SQL statement.
	//
	// Execute a SQL statement, and if flagged as such, await its result for a
	// specified time.
	ExecuteStatement(ctx context.Context, request ExecuteStatementRequest) (*ExecuteStatementResponse, error)

	// Get status, manifest, and result first chunk.
	//
	// This request can be used to poll for the statement's status. When the
	// `status.state` field is `SUCCEEDED` it will also return the result
	// manifest and the first chunk of the result data. When the statement is in
	// the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200
	// with the state set. After at least 12 hours in terminal state, the
	// statement is removed from the warehouse and further calls will receive an
	// HTTP 404 response.
	//
	// **NOTE** This call currently may take up to 5 seconds to get the latest
	// status and result.
	GetStatement(ctx context.Context, request GetStatementRequest) (*GetStatementResponse, error)

	// Get result chunk by index.
	//
	// After the statement execution has `SUCCEEDED`, the result data can be
	// fetched by chunks. Whereas the first chuck with `chunk_index=0` is
	// typically fetched through a `get status` request, subsequent chunks can
	// be fetched using a `get result` request. The response structure is
	// identical to the nested `result` element described in the `get status`
	// request, and similarly includes the `next_chunk_index` and
	// `next_chunk_internal_link` fields for simple iteration through the result
	// set.
	GetStatementResultChunkN(ctx context.Context, request GetStatementResultChunkNRequest) (*ResultData, error)
}

The SQL Statement Execution API manages the execution of arbitrary SQL statements and the fetching of result data.

**Release status**

This feature is in Public Preview.

**Getting started**

We suggest beginning with the SQL Statement Execution API tutorial.

**Overview of statement execution and result fetching**

Statement execution begins by issuing a :method:statementexecution/executeStatement request with a valid SQL statement and warehouse ID, along with optional parameters such as the data catalog and output format.

When submitting the statement, the call can behave synchronously or asynchronously, based on the `wait_timeout` setting. When set between 5-50 seconds (default: 10) the call behaves synchronously and waits for results up to the specified timeout; when set to `0s`, the call is asynchronous and responds immediately with a statement ID that can be used to poll for status or fetch the results in a separate call.

**Call mode: synchronous**

In synchronous mode, when statement execution completes within the `wait timeout`, the result data is returned directly in the response. This response will contain `statement_id`, `status`, `manifest`, and `result` fields. The `status` field confirms success whereas the `manifest` field contains the result data column schema and metadata about the result set. The `result` field contains the first chunk of result data according to the specified `disposition`, and links to fetch any remaining chunks.

If the execution does not complete before `wait_timeout`, the setting `on_wait_timeout` determines how the system responds.

By default, `on_wait_timeout=CONTINUE`, and after reaching `wait_timeout`, a response is returned and statement execution continues asynchronously. The response will contain only `statement_id` and `status` fields, and the caller must now follow the flow described for asynchronous call mode to poll and fetch the result.

Alternatively, `on_wait_timeout` can also be set to `CANCEL`; in this case if the timeout is reached before execution completes, the underlying statement execution is canceled, and a `CANCELED` status is returned in the response.

**Call mode: asynchronous**

In asynchronous mode, or after a timed-out synchronous request continues, a `statement_id` and `status` will be returned. In this case polling :method:statementexecution/getStatement calls are required to fetch the result and metadata.

Next, a caller must poll until execution completes (`SUCCEEDED`, `FAILED`, etc.) by issuing :method:statementexecution/getStatement requests for the given `statement_id`.

When execution has succeeded, the response will contain `status`, `manifest`, and `result` fields. These fields and the structure are identical to those in the response to a successful synchronous submission. The `result` field will contain the first chunk of result data, either `INLINE` or as `EXTERNAL_LINKS` depending on `disposition`. Additional chunks of result data can be fetched by checking for the presence of the `next_chunk_internal_link` field, and iteratively `GET` those paths until that field is unset: `GET https://$DATABRICKS_HOST/{next_chunk_internal_link}`.

**Fetching result data: format and disposition**

To specify the result data format, set the `format` field to `JSON_ARRAY` (JSON), `ARROW_STREAM` (Apache Arrow Columnar), or `CSV`.

You can also configure how to fetch the result data in two different modes by setting the `disposition` field to `INLINE` or `EXTERNAL_LINKS`.

The `INLINE` disposition can only be used with the `JSON_ARRAY` format and allows results up to 16 MiB. When a statement executed with `INLINE` disposition exceeds this limit, the execution is aborted, and no result can be fetched.

The `EXTERNAL_LINKS` disposition allows fetching large result sets in `JSON_ARRAY`, `ARROW_STREAM` and `CSV` formats, and with higher throughput.

The API uses defaults of `format=JSON_ARRAY` and `disposition=INLINE`. Databricks recommends that you explicit setting the format and the disposition for all production use cases.

**Statement response: statement_id, status, manifest, and result**

The base call :method:statementexecution/getStatement returns a single response combining `statement_id`, `status`, a result `manifest`, and a `result` data chunk or link, depending on the `disposition`. The `manifest` contains the result schema definition and the result summary metadata. When using `disposition=EXTERNAL_LINKS`, it also contains a full listing of all chunks and their summary metadata.

**Use case: small result sets with INLINE + JSON_ARRAY**

For flows that generate small and predictable result sets (<= 16 MiB), `INLINE` downloads of `JSON_ARRAY` result data are typically the simplest way to execute and fetch result data.

When the result set with `disposition=INLINE` is larger, the result can be transferred in chunks. After receiving the initial chunk with :method:statementexecution/executeStatement or :method:statementexecution/getStatement subsequent calls are required to iteratively fetch each chunk. Each result response contains a link to the next chunk, when there are additional chunks to fetch; it can be found in the field `.next_chunk_internal_link`. This link is an absolute `path` to be joined with your `$DATABRICKS_HOST`, and of the form `/api/2.0/sql/statements/{statement_id}/result/chunks/{chunk_index}`. The next chunk can be fetched by issuing a :method:statementexecution/getStatementResultChunkN request.

When using this mode, each chunk may be fetched once, and in order. A chunk without a field `next_chunk_internal_link` indicates the last chunk was reached and all chunks have been fetched from the result set.

**Use case: large result sets with EXTERNAL_LINKS + ARROW_STREAM**

Using `EXTERNAL_LINKS` to fetch result data in Arrow format allows you to fetch large result sets efficiently. The primary difference from using `INLINE` disposition is that fetched result chunks contain resolved `external_links` URLs, which can be fetched with standard HTTP.

**Presigned URLs**

External links point to data stored within your workspace's internal DBFS, in the form of a presigned URL. The URLs are valid for only a short period, <= 15 minutes. Alongside each `external_link` is an expiration field indicating the time at which the URL is no longer valid. In `EXTERNAL_LINKS` mode, chunks can be resolved and fetched multiple times and in parallel.

----

### **Warning: We recommend you protect the URLs in the EXTERNAL_LINKS.**

When using the EXTERNAL_LINKS disposition, a short-lived pre-signed URL is generated, which the client can use to download the result chunk directly from cloud storage. As the short-lived credential is embedded in a pre-signed URL, this URL should be protected.

Since pre-signed URLs are generated with embedded temporary credentials, you need to remove the authorization header from the fetch requests.

----

Similar to `INLINE` mode, callers can iterate through the result set, by using the `next_chunk_internal_link` field. Each internal link response will contain an external link to the raw chunk data, and additionally contain the `next_chunk_internal_link` if there are more chunks.

Unlike `INLINE` mode, when using `EXTERNAL_LINKS`, chunks may be fetched out of order, and in parallel to achieve higher throughput.

**Limits and limitations**

Note: All byte limits are calculated based on internal storage metrics and will not match byte counts of actual payloads.

- Statements with `disposition=INLINE` are limited to 16 MiB and will abort when this limit is exceeded. - Statements with `disposition=EXTERNAL_LINKS` are limited to 100 GiB. - The maximum query text size is 16 MiB. - Cancelation may silently fail. A successful response from a cancel request indicates that the cancel request was successfully received and sent to the processing engine. However, for example, an outstanding statement may complete execution during signal delivery, with the cancel signal arriving too late to be meaningful. Polling for status until a terminal state is reached is a reliable way to determine the final state. - Wait timeouts are approximate, occur server-side, and cannot account for caller delays, network latency from caller to service, and similarly. - After a statement has been submitted and a statement_id is returned, that statement's status and result will automatically close after either of 2 conditions: - The last result chunk is fetched (or resolved to an external link). - One hour passes with no calls to get the status or fetch the result. Best practice: in asynchronous clients, poll for status regularly (and with backoff) to keep the statement open and alive. - After fetching the last result chunk (including chunk_index=0) the statement is automatically closed.

type StatementState added in v0.3.0

type StatementState string

Statement execution state: - `PENDING`: waiting for warehouse - `RUNNING`: running - `SUCCEEDED`: execution was successful, result data available for fetch - `FAILED`: execution failed; reason for failure described in accomanying error message - `CANCELED`: user canceled; can come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL` - `CLOSED`: execution successful, and statement closed; result no longer available for fetch

const StatementStateCanceled StatementState = `CANCELED`
const StatementStateClosed StatementState = `CLOSED`
const StatementStateFailed StatementState = `FAILED`
const StatementStatePending StatementState = `PENDING`
const StatementStateRunning StatementState = `RUNNING`
const StatementStateSucceeded StatementState = `SUCCEEDED`

func (*StatementState) Set added in v0.3.0

func (f *StatementState) Set(v string) error

Set raw string value and validate it against allowed values

func (*StatementState) String added in v0.3.0

func (f *StatementState) String() string

String representation for fmt.Print

func (*StatementState) Type added in v0.3.0

func (f *StatementState) Type() string

Type always returns StatementState to satisfy [pflag.Value] interface

type StatementStatus added in v0.3.0

type StatementStatus struct {
	Error *ServiceError `json:"error,omitempty"`
	// Statement execution state: - `PENDING`: waiting for warehouse -
	// `RUNNING`: running - `SUCCEEDED`: execution was successful, result data
	// available for fetch - `FAILED`: execution failed; reason for failure
	// described in accomanying error message - `CANCELED`: user canceled; can
	// come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL`
	// - `CLOSED`: execution successful, and statement closed; result no longer
	// available for fetch
	State StatementState `json:"state,omitempty"`
}

Status response includes execution state and if relevant, error information.

type Status

type Status string

Health status of the warehouse.

const StatusDegraded Status = `DEGRADED`
const StatusFailed Status = `FAILED`
const StatusHealthy Status = `HEALTHY`
const StatusStatusUnspecified Status = `STATUS_UNSPECIFIED`

func (*Status) Set

func (f *Status) Set(v string) error

Set raw string value and validate it against allowed values

func (*Status) String

func (f *Status) String() string

String representation for fmt.Print

func (*Status) Type

func (f *Status) Type() string

Type always returns Status to satisfy [pflag.Value] interface

type StopRequest

type StopRequest struct {
	// Required. Id of the SQL warehouse.
	Id string `json:"-" url:"-"`
}

Stop a warehouse

type Success

type Success struct {
	Message SuccessMessage `json:"message,omitempty"`
}

type SuccessMessage

type SuccessMessage string
const SuccessMessageSuccess SuccessMessage = `Success`

func (*SuccessMessage) Set

func (f *SuccessMessage) Set(v string) error

Set raw string value and validate it against allowed values

func (*SuccessMessage) String

func (f *SuccessMessage) String() string

String representation for fmt.Print

func (*SuccessMessage) Type

func (f *SuccessMessage) Type() string

Type always returns SuccessMessage to satisfy [pflag.Value] interface

type TerminationReason

type TerminationReason struct {
	// status code indicating why the cluster was terminated
	Code TerminationReasonCode `json:"code,omitempty"`
	// list of parameters that provide additional information about why the
	// cluster was terminated
	Parameters map[string]string `json:"parameters,omitempty"`
	// type of the termination
	Type TerminationReasonType `json:"type,omitempty"`
}

type TerminationReasonCode

type TerminationReasonCode string

status code indicating why the cluster was terminated

const TerminationReasonCodeAbuseDetected TerminationReasonCode = `ABUSE_DETECTED`
const TerminationReasonCodeAttachProjectFailure TerminationReasonCode = `ATTACH_PROJECT_FAILURE`
const TerminationReasonCodeAwsAuthorizationFailure TerminationReasonCode = `AWS_AUTHORIZATION_FAILURE`
const TerminationReasonCodeAwsInsufficientFreeAddressesInSubnetFailure TerminationReasonCode = `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`
const TerminationReasonCodeAwsInsufficientInstanceCapacityFailure TerminationReasonCode = `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`
const TerminationReasonCodeAwsMaxSpotInstanceCountExceededFailure TerminationReasonCode = `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`
const TerminationReasonCodeAwsRequestLimitExceeded TerminationReasonCode = `AWS_REQUEST_LIMIT_EXCEEDED`
const TerminationReasonCodeAwsUnsupportedFailure TerminationReasonCode = `AWS_UNSUPPORTED_FAILURE`
const TerminationReasonCodeAzureByokKeyPermissionFailure TerminationReasonCode = `AZURE_BYOK_KEY_PERMISSION_FAILURE`
const TerminationReasonCodeAzureEphemeralDiskFailure TerminationReasonCode = `AZURE_EPHEMERAL_DISK_FAILURE`
const TerminationReasonCodeAzureInvalidDeploymentTemplate TerminationReasonCode = `AZURE_INVALID_DEPLOYMENT_TEMPLATE`
const TerminationReasonCodeAzureOperationNotAllowedException TerminationReasonCode = `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`
const TerminationReasonCodeAzureQuotaExceededException TerminationReasonCode = `AZURE_QUOTA_EXCEEDED_EXCEPTION`
const TerminationReasonCodeAzureResourceManagerThrottling TerminationReasonCode = `AZURE_RESOURCE_MANAGER_THROTTLING`
const TerminationReasonCodeAzureResourceProviderThrottling TerminationReasonCode = `AZURE_RESOURCE_PROVIDER_THROTTLING`
const TerminationReasonCodeAzureUnexpectedDeploymentTemplateFailure TerminationReasonCode = `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`
const TerminationReasonCodeAzureVmExtensionFailure TerminationReasonCode = `AZURE_VM_EXTENSION_FAILURE`
const TerminationReasonCodeAzureVnetConfigurationFailure TerminationReasonCode = `AZURE_VNET_CONFIGURATION_FAILURE`
const TerminationReasonCodeBootstrapTimeout TerminationReasonCode = `BOOTSTRAP_TIMEOUT`
const TerminationReasonCodeBootstrapTimeoutCloudProviderException TerminationReasonCode = `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`
const TerminationReasonCodeCloudProviderDiskSetupFailure TerminationReasonCode = `CLOUD_PROVIDER_DISK_SETUP_FAILURE`
const TerminationReasonCodeCloudProviderLaunchFailure TerminationReasonCode = `CLOUD_PROVIDER_LAUNCH_FAILURE`
const TerminationReasonCodeCloudProviderResourceStockout TerminationReasonCode = `CLOUD_PROVIDER_RESOURCE_STOCKOUT`
const TerminationReasonCodeCloudProviderShutdown TerminationReasonCode = `CLOUD_PROVIDER_SHUTDOWN`
const TerminationReasonCodeCommunicationLost TerminationReasonCode = `COMMUNICATION_LOST`
const TerminationReasonCodeContainerLaunchFailure TerminationReasonCode = `CONTAINER_LAUNCH_FAILURE`
const TerminationReasonCodeControlPlaneRequestFailure TerminationReasonCode = `CONTROL_PLANE_REQUEST_FAILURE`
const TerminationReasonCodeDatabaseConnectionFailure TerminationReasonCode = `DATABASE_CONNECTION_FAILURE`
const TerminationReasonCodeDbfsComponentUnhealthy TerminationReasonCode = `DBFS_COMPONENT_UNHEALTHY`
const TerminationReasonCodeDockerImagePullFailure TerminationReasonCode = `DOCKER_IMAGE_PULL_FAILURE`
const TerminationReasonCodeDriverUnreachable TerminationReasonCode = `DRIVER_UNREACHABLE`
const TerminationReasonCodeDriverUnresponsive TerminationReasonCode = `DRIVER_UNRESPONSIVE`
const TerminationReasonCodeExecutionComponentUnhealthy TerminationReasonCode = `EXECUTION_COMPONENT_UNHEALTHY`
const TerminationReasonCodeGcpQuotaExceeded TerminationReasonCode = `GCP_QUOTA_EXCEEDED`
const TerminationReasonCodeGcpServiceAccountDeleted TerminationReasonCode = `GCP_SERVICE_ACCOUNT_DELETED`
const TerminationReasonCodeGlobalInitScriptFailure TerminationReasonCode = `GLOBAL_INIT_SCRIPT_FAILURE`
const TerminationReasonCodeHiveMetastoreProvisioningFailure TerminationReasonCode = `HIVE_METASTORE_PROVISIONING_FAILURE`
const TerminationReasonCodeImagePullPermissionDenied TerminationReasonCode = `IMAGE_PULL_PERMISSION_DENIED`
const TerminationReasonCodeInactivity TerminationReasonCode = `INACTIVITY`
const TerminationReasonCodeInitScriptFailure TerminationReasonCode = `INIT_SCRIPT_FAILURE`
const TerminationReasonCodeInstancePoolClusterFailure TerminationReasonCode = `INSTANCE_POOL_CLUSTER_FAILURE`
const TerminationReasonCodeInstanceUnreachable TerminationReasonCode = `INSTANCE_UNREACHABLE`
const TerminationReasonCodeInternalError TerminationReasonCode = `INTERNAL_ERROR`
const TerminationReasonCodeInvalidArgument TerminationReasonCode = `INVALID_ARGUMENT`
const TerminationReasonCodeInvalidSparkImage TerminationReasonCode = `INVALID_SPARK_IMAGE`
const TerminationReasonCodeIpExhaustionFailure TerminationReasonCode = `IP_EXHAUSTION_FAILURE`
const TerminationReasonCodeJobFinished TerminationReasonCode = `JOB_FINISHED`
const TerminationReasonCodeK8sAutoscalingFailure TerminationReasonCode = `K8S_AUTOSCALING_FAILURE`
const TerminationReasonCodeK8sDbrClusterLaunchTimeout TerminationReasonCode = `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`
const TerminationReasonCodeMetastoreComponentUnhealthy TerminationReasonCode = `METASTORE_COMPONENT_UNHEALTHY`
const TerminationReasonCodeNephosResourceManagement TerminationReasonCode = `NEPHOS_RESOURCE_MANAGEMENT`
const TerminationReasonCodeNetworkConfigurationFailure TerminationReasonCode = `NETWORK_CONFIGURATION_FAILURE`
const TerminationReasonCodeNfsMountFailure TerminationReasonCode = `NFS_MOUNT_FAILURE`
const TerminationReasonCodeNpipTunnelSetupFailure TerminationReasonCode = `NPIP_TUNNEL_SETUP_FAILURE`
const TerminationReasonCodeNpipTunnelTokenFailure TerminationReasonCode = `NPIP_TUNNEL_TOKEN_FAILURE`
const TerminationReasonCodeRequestRejected TerminationReasonCode = `REQUEST_REJECTED`
const TerminationReasonCodeRequestThrottled TerminationReasonCode = `REQUEST_THROTTLED`
const TerminationReasonCodeSecretResolutionError TerminationReasonCode = `SECRET_RESOLUTION_ERROR`
const TerminationReasonCodeSecurityDaemonRegistrationException TerminationReasonCode = `SECURITY_DAEMON_REGISTRATION_EXCEPTION`
const TerminationReasonCodeSelfBootstrapFailure TerminationReasonCode = `SELF_BOOTSTRAP_FAILURE`
const TerminationReasonCodeSkippedSlowNodes TerminationReasonCode = `SKIPPED_SLOW_NODES`
const TerminationReasonCodeSlowImageDownload TerminationReasonCode = `SLOW_IMAGE_DOWNLOAD`
const TerminationReasonCodeSparkError TerminationReasonCode = `SPARK_ERROR`
const TerminationReasonCodeSparkImageDownloadFailure TerminationReasonCode = `SPARK_IMAGE_DOWNLOAD_FAILURE`
const TerminationReasonCodeSparkStartupFailure TerminationReasonCode = `SPARK_STARTUP_FAILURE`
const TerminationReasonCodeSpotInstanceTermination TerminationReasonCode = `SPOT_INSTANCE_TERMINATION`
const TerminationReasonCodeStorageDownloadFailure TerminationReasonCode = `STORAGE_DOWNLOAD_FAILURE`
const TerminationReasonCodeStsClientSetupFailure TerminationReasonCode = `STS_CLIENT_SETUP_FAILURE`
const TerminationReasonCodeSubnetExhaustedFailure TerminationReasonCode = `SUBNET_EXHAUSTED_FAILURE`
const TerminationReasonCodeTemporarilyUnavailable TerminationReasonCode = `TEMPORARILY_UNAVAILABLE`
const TerminationReasonCodeTrialExpired TerminationReasonCode = `TRIAL_EXPIRED`
const TerminationReasonCodeUnexpectedLaunchFailure TerminationReasonCode = `UNEXPECTED_LAUNCH_FAILURE`
const TerminationReasonCodeUnknown TerminationReasonCode = `UNKNOWN`
const TerminationReasonCodeUnsupportedInstanceType TerminationReasonCode = `UNSUPPORTED_INSTANCE_TYPE`
const TerminationReasonCodeUpdateInstanceProfileFailure TerminationReasonCode = `UPDATE_INSTANCE_PROFILE_FAILURE`
const TerminationReasonCodeUserRequest TerminationReasonCode = `USER_REQUEST`
const TerminationReasonCodeWorkerSetupFailure TerminationReasonCode = `WORKER_SETUP_FAILURE`
const TerminationReasonCodeWorkspaceCancelledError TerminationReasonCode = `WORKSPACE_CANCELLED_ERROR`
const TerminationReasonCodeWorkspaceConfigurationError TerminationReasonCode = `WORKSPACE_CONFIGURATION_ERROR`

func (*TerminationReasonCode) Set

Set raw string value and validate it against allowed values

func (*TerminationReasonCode) String

func (f *TerminationReasonCode) String() string

String representation for fmt.Print

func (*TerminationReasonCode) Type

func (f *TerminationReasonCode) Type() string

Type always returns TerminationReasonCode to satisfy [pflag.Value] interface

type TerminationReasonType

type TerminationReasonType string

type of the termination

const TerminationReasonTypeClientError TerminationReasonType = `CLIENT_ERROR`
const TerminationReasonTypeCloudFailure TerminationReasonType = `CLOUD_FAILURE`
const TerminationReasonTypeServiceFault TerminationReasonType = `SERVICE_FAULT`
const TerminationReasonTypeSuccess TerminationReasonType = `SUCCESS`

func (*TerminationReasonType) Set

Set raw string value and validate it against allowed values

func (*TerminationReasonType) String

func (f *TerminationReasonType) String() string

String representation for fmt.Print

func (*TerminationReasonType) Type

func (f *TerminationReasonType) Type() string

Type always returns TerminationReasonType to satisfy [pflag.Value] interface

type TimeRange

type TimeRange struct {
	// Limit results to queries that started before this time.
	EndTimeMs int `json:"end_time_ms,omitempty"`
	// Limit results to queries that started after this time.
	StartTimeMs int `json:"start_time_ms,omitempty"`
}

type TimeoutAction added in v0.3.0

type TimeoutAction string

When in synchronous mode with `wait_timeout > 0s` it determines the action taken when the timeout is reached:

`CONTINUE` → the statement execution continues asynchronously and the call returns a statement ID immediately.

`CANCEL` → the statement execution is canceled and the call returns immediately with a `CANCELED` state.

const TimeoutActionCancel TimeoutAction = `CANCEL`
const TimeoutActionContinue TimeoutAction = `CONTINUE`

func (*TimeoutAction) Set added in v0.3.0

func (f *TimeoutAction) Set(v string) error

Set raw string value and validate it against allowed values

func (*TimeoutAction) String added in v0.3.0

func (f *TimeoutAction) String() string

String representation for fmt.Print

func (*TimeoutAction) Type added in v0.3.0

func (f *TimeoutAction) Type() string

Type always returns TimeoutAction to satisfy [pflag.Value] interface

type TransferOwnershipObjectId

type TransferOwnershipObjectId struct {
	// Email address for the new owner, who must exist in the workspace.
	NewOwner string `json:"new_owner,omitempty"`
}

type TransferOwnershipRequest

type TransferOwnershipRequest struct {
	// Email address for the new owner, who must exist in the workspace.
	NewOwner string `json:"new_owner,omitempty"`
	// The ID of the object on which to change ownership.
	ObjectId TransferOwnershipObjectId `json:"-" url:"-"`
	// The type of object on which to change ownership.
	ObjectType OwnableObjectType `json:"-" url:"-"`
}

Transfer object ownership

type User

type User struct {
	Email string `json:"email,omitempty"`

	Id int `json:"id,omitempty"`

	Name string `json:"name,omitempty"`
}

type Visualization

type Visualization struct {
	CreatedAt string `json:"created_at,omitempty"`
	// A short description of this visualization. This is not displayed in the
	// UI.
	Description string `json:"description,omitempty"`
	// The UUID for this visualization.
	Id string `json:"id,omitempty"`
	// The name of the visualization that appears on dashboards and the query
	// screen.
	Name string `json:"name,omitempty"`
	// The options object varies widely from one visualization type to the next
	// and is unsupported. Databricks does not recommend modifying visualization
	// settings in JSON.
	Options any `json:"options,omitempty"`
	// The type of visualization: chart, table, pivot table, and so on.
	Type string `json:"type,omitempty"`

	UpdatedAt string `json:"updated_at,omitempty"`
}

The visualization description API changes frequently and is unsupported. You can duplicate a visualization by copying description objects received _from the API_ and then using them to create a new one with a POST request to the same endpoint. Databricks does not recommend constructing ad-hoc visualizations entirely in JSON.

type WaitGetWarehouseRunning added in v0.10.0

type WaitGetWarehouseRunning[R any] struct {
	Response *R
	Id       string `json:"id"`
	// contains filtered or unexported fields
}

WaitGetWarehouseRunning is a wrapper that calls WarehousesAPI.WaitGetWarehouseRunning and waits to reach RUNNING state.

func (*WaitGetWarehouseRunning[R]) Get added in v0.10.0

Get the GetWarehouseResponse with the default timeout of 20 minutes.

func (*WaitGetWarehouseRunning[R]) GetWithTimeout added in v0.10.0

func (w *WaitGetWarehouseRunning[R]) GetWithTimeout(timeout time.Duration) (*GetWarehouseResponse, error)

Get the GetWarehouseResponse with custom timeout.

func (*WaitGetWarehouseRunning[R]) OnProgress added in v0.10.0

func (w *WaitGetWarehouseRunning[R]) OnProgress(callback func(*GetWarehouseResponse)) *WaitGetWarehouseRunning[R]

OnProgress invokes a callback every time it polls for the status update.

type WaitGetWarehouseStopped added in v0.10.0

type WaitGetWarehouseStopped[R any] struct {
	Response *R
	Id       string `json:"id"`
	// contains filtered or unexported fields
}

WaitGetWarehouseStopped is a wrapper that calls WarehousesAPI.WaitGetWarehouseStopped and waits to reach STOPPED state.

func (*WaitGetWarehouseStopped[R]) Get added in v0.10.0

Get the GetWarehouseResponse with the default timeout of 20 minutes.

func (*WaitGetWarehouseStopped[R]) GetWithTimeout added in v0.10.0

func (w *WaitGetWarehouseStopped[R]) GetWithTimeout(timeout time.Duration) (*GetWarehouseResponse, error)

Get the GetWarehouseResponse with custom timeout.

func (*WaitGetWarehouseStopped[R]) OnProgress added in v0.10.0

func (w *WaitGetWarehouseStopped[R]) OnProgress(callback func(*GetWarehouseResponse)) *WaitGetWarehouseStopped[R]

OnProgress invokes a callback every time it polls for the status update.

type WarehouseTypePair

type WarehouseTypePair struct {
	// If set to false the specific warehouse type will not be be allowed as a
	// value for warehouse_type in CreateWarehouse and EditWarehouse
	Enabled bool `json:"enabled,omitempty"`
	// Warehouse type: `PRO` or `CLASSIC`.
	WarehouseType WarehouseTypePairWarehouseType `json:"warehouse_type,omitempty"`
}

type WarehouseTypePairWarehouseType added in v0.9.0

type WarehouseTypePairWarehouseType string

Warehouse type: `PRO` or `CLASSIC`.

const WarehouseTypePairWarehouseTypeClassic WarehouseTypePairWarehouseType = `CLASSIC`
const WarehouseTypePairWarehouseTypePro WarehouseTypePairWarehouseType = `PRO`
const WarehouseTypePairWarehouseTypeTypeUnspecified WarehouseTypePairWarehouseType = `TYPE_UNSPECIFIED`

func (*WarehouseTypePairWarehouseType) Set added in v0.9.0

Set raw string value and validate it against allowed values

func (*WarehouseTypePairWarehouseType) String added in v0.9.0

String representation for fmt.Print

func (*WarehouseTypePairWarehouseType) Type added in v0.9.0

Type always returns WarehouseTypePairWarehouseType to satisfy [pflag.Value] interface

type WarehousesAPI

type WarehousesAPI struct {
	// contains filtered or unexported fields
}

A SQL warehouse is a compute resource that lets you run SQL commands on data objects within Databricks SQL. Compute resources are infrastructure resources that provide processing capabilities in the cloud.

func NewWarehouses

func NewWarehouses(client *client.DatabricksClient) *WarehousesAPI

func (*WarehousesAPI) Create

Create a warehouse.

Creates a new SQL warehouse.

Example (SqlWarehouses)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Warehouses.CreateAndWait(ctx, sql.CreateWarehouseRequest{
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	ClusterSize:    "2X-Small",
	MaxNumClusters: 1,
	AutoStopMins:   10,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = w.Warehouses.DeleteById(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*WarehousesAPI) CreateAndWait deprecated

func (a *WarehousesAPI) CreateAndWait(ctx context.Context, createWarehouseRequest CreateWarehouseRequest, options ...retries.Option[GetWarehouseResponse]) (*GetWarehouseResponse, error)

Calls WarehousesAPI.Create and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[GetWarehouseResponse](60*time.Minute) functional option.

Deprecated: use WarehousesAPI.Create.Get() or WarehousesAPI.WaitGetWarehouseRunning

func (*WarehousesAPI) Delete

func (a *WarehousesAPI) Delete(ctx context.Context, request DeleteWarehouseRequest) error

Delete a warehouse.

Deletes a SQL warehouse.

func (*WarehousesAPI) DeleteById

func (a *WarehousesAPI) DeleteById(ctx context.Context, id string) error

Delete a warehouse.

Deletes a SQL warehouse.

func (*WarehousesAPI) Edit

func (a *WarehousesAPI) Edit(ctx context.Context, editWarehouseRequest EditWarehouseRequest) (*WaitGetWarehouseRunning[any], error)

Update a warehouse.

Updates the configuration for a SQL warehouse.

Example (SqlWarehouses)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Warehouses.CreateAndWait(ctx, sql.CreateWarehouseRequest{
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	ClusterSize:    "2X-Small",
	MaxNumClusters: 1,
	AutoStopMins:   10,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = w.Warehouses.Edit(ctx, sql.EditWarehouseRequest{
	Id:             created.Id,
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	ClusterSize:    "2X-Small",
	MaxNumClusters: 1,
	AutoStopMins:   10,
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Warehouses.DeleteById(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*WarehousesAPI) EditAndWait deprecated

func (a *WarehousesAPI) EditAndWait(ctx context.Context, editWarehouseRequest EditWarehouseRequest, options ...retries.Option[GetWarehouseResponse]) (*GetWarehouseResponse, error)

Calls WarehousesAPI.Edit and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[GetWarehouseResponse](60*time.Minute) functional option.

Deprecated: use WarehousesAPI.Edit.Get() or WarehousesAPI.WaitGetWarehouseRunning

func (*WarehousesAPI) EndpointInfoNameToIdMap

func (a *WarehousesAPI) EndpointInfoNameToIdMap(ctx context.Context, request ListWarehousesRequest) (map[string]string, error)

EndpointInfoNameToIdMap calls WarehousesAPI.ListAll and creates a map of results with EndpointInfo.Name as key and EndpointInfo.Id as value.

Returns an error if there's more than one EndpointInfo with the same .Name.

Note: All EndpointInfo instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*WarehousesAPI) Get

Get warehouse info.

Gets the information for a single SQL warehouse.

Example (SqlWarehouses)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Warehouses.CreateAndWait(ctx, sql.CreateWarehouseRequest{
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	ClusterSize:    "2X-Small",
	MaxNumClusters: 1,
	AutoStopMins:   10,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

wh, err := w.Warehouses.GetById(ctx, created.Id)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", wh)

// cleanup

err = w.Warehouses.DeleteById(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*WarehousesAPI) GetById

Get warehouse info.

Gets the information for a single SQL warehouse.

func (*WarehousesAPI) GetByName

func (a *WarehousesAPI) GetByName(ctx context.Context, name string) (*EndpointInfo, error)

GetByName calls WarehousesAPI.EndpointInfoNameToIdMap and returns a single EndpointInfo.

Returns an error if there's more than one EndpointInfo with the same .Name.

Note: All EndpointInfo instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*WarehousesAPI) GetWorkspaceWarehouseConfig

func (a *WarehousesAPI) GetWorkspaceWarehouseConfig(ctx context.Context) (*GetWorkspaceWarehouseConfigResponse, error)

Get the workspace configuration.

Gets the workspace level configuration that is shared by all SQL warehouses in a workspace.

func (*WarehousesAPI) Impl

func (a *WarehousesAPI) Impl() WarehousesService

Impl returns low-level Warehouses API implementation

func (*WarehousesAPI) ListAll

func (a *WarehousesAPI) ListAll(ctx context.Context, request ListWarehousesRequest) ([]EndpointInfo, error)

List warehouses.

Lists all SQL warehouses that a user has manager permissions on.

This method is generated by Databricks SDK Code Generator.

Example (SqlWarehouses)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.Warehouses.ListAll(ctx, sql.ListWarehousesRequest{})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*WarehousesAPI) SetWorkspaceWarehouseConfig

func (a *WarehousesAPI) SetWorkspaceWarehouseConfig(ctx context.Context, request SetWorkspaceWarehouseConfigRequest) error

Set the workspace configuration.

Sets the workspace level configuration that is shared by all SQL warehouses in a workspace.

func (*WarehousesAPI) Start

func (a *WarehousesAPI) Start(ctx context.Context, startRequest StartRequest) (*WaitGetWarehouseRunning[any], error)

Start a warehouse.

Starts a SQL warehouse.

func (*WarehousesAPI) StartAndWait deprecated

func (a *WarehousesAPI) StartAndWait(ctx context.Context, startRequest StartRequest, options ...retries.Option[GetWarehouseResponse]) (*GetWarehouseResponse, error)

Calls WarehousesAPI.Start and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[GetWarehouseResponse](60*time.Minute) functional option.

Deprecated: use WarehousesAPI.Start.Get() or WarehousesAPI.WaitGetWarehouseRunning

func (*WarehousesAPI) Stop

func (a *WarehousesAPI) Stop(ctx context.Context, stopRequest StopRequest) (*WaitGetWarehouseStopped[any], error)

Stop a warehouse.

Stops a SQL warehouse.

func (*WarehousesAPI) StopAndWait deprecated

func (a *WarehousesAPI) StopAndWait(ctx context.Context, stopRequest StopRequest, options ...retries.Option[GetWarehouseResponse]) (*GetWarehouseResponse, error)

Calls WarehousesAPI.Stop and waits to reach STOPPED state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[GetWarehouseResponse](60*time.Minute) functional option.

Deprecated: use WarehousesAPI.Stop.Get() or WarehousesAPI.WaitGetWarehouseStopped

func (*WarehousesAPI) WaitGetWarehouseRunning added in v0.10.0

func (a *WarehousesAPI) WaitGetWarehouseRunning(ctx context.Context, id string,
	timeout time.Duration, callback func(*GetWarehouseResponse)) (*GetWarehouseResponse, error)

WaitGetWarehouseRunning repeatedly calls WarehousesAPI.Get and waits to reach RUNNING state

func (*WarehousesAPI) WaitGetWarehouseStopped added in v0.10.0

func (a *WarehousesAPI) WaitGetWarehouseStopped(ctx context.Context, id string,
	timeout time.Duration, callback func(*GetWarehouseResponse)) (*GetWarehouseResponse, error)

WaitGetWarehouseStopped repeatedly calls WarehousesAPI.Get and waits to reach STOPPED state

func (*WarehousesAPI) WithImpl

func (a *WarehousesAPI) WithImpl(impl WarehousesService) *WarehousesAPI

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type WarehousesService

type WarehousesService interface {

	// Create a warehouse.
	//
	// Creates a new SQL warehouse.
	Create(ctx context.Context, request CreateWarehouseRequest) (*CreateWarehouseResponse, error)

	// Delete a warehouse.
	//
	// Deletes a SQL warehouse.
	Delete(ctx context.Context, request DeleteWarehouseRequest) error

	// Update a warehouse.
	//
	// Updates the configuration for a SQL warehouse.
	Edit(ctx context.Context, request EditWarehouseRequest) error

	// Get warehouse info.
	//
	// Gets the information for a single SQL warehouse.
	Get(ctx context.Context, request GetWarehouseRequest) (*GetWarehouseResponse, error)

	// Get the workspace configuration.
	//
	// Gets the workspace level configuration that is shared by all SQL
	// warehouses in a workspace.
	GetWorkspaceWarehouseConfig(ctx context.Context) (*GetWorkspaceWarehouseConfigResponse, error)

	// List warehouses.
	//
	// Lists all SQL warehouses that a user has manager permissions on.
	//
	// Use ListAll() to get all EndpointInfo instances
	List(ctx context.Context, request ListWarehousesRequest) (*ListWarehousesResponse, error)

	// Set the workspace configuration.
	//
	// Sets the workspace level configuration that is shared by all SQL
	// warehouses in a workspace.
	SetWorkspaceWarehouseConfig(ctx context.Context, request SetWorkspaceWarehouseConfigRequest) error

	// Start a warehouse.
	//
	// Starts a SQL warehouse.
	Start(ctx context.Context, request StartRequest) error

	// Stop a warehouse.
	//
	// Stops a SQL warehouse.
	Stop(ctx context.Context, request StopRequest) error
}

A SQL warehouse is a compute resource that lets you run SQL commands on data objects within Databricks SQL. Compute resources are infrastructure resources that provide processing capabilities in the cloud.

type Widget

type Widget struct {
	// The unique ID for this widget.
	Id int `json:"id,omitempty"`

	Options *WidgetOptions `json:"options,omitempty"`
	// The visualization description API changes frequently and is unsupported.
	// You can duplicate a visualization by copying description objects received
	// _from the API_ and then using them to create a new one with a POST
	// request to the same endpoint. Databricks does not recommend constructing
	// ad-hoc visualizations entirely in JSON.
	Visualization *Visualization `json:"visualization,omitempty"`
	// Unused field.
	Width int `json:"width,omitempty"`
}

type WidgetOptions

type WidgetOptions struct {
	// Timestamp when this object was created
	CreatedAt string `json:"created_at,omitempty"`
	// The dashboard ID to which this widget belongs. Each widget can belong to
	// one dashboard.
	DashboardId string `json:"dashboard_id,omitempty"`
	// Whether this widget is hidden on the dashboard.
	IsHidden bool `json:"isHidden,omitempty"`
	// How parameters used by the visualization in this widget relate to other
	// widgets on the dashboard. Databricks does not recommend modifying this
	// definition in JSON.
	ParameterMappings any `json:"parameterMappings,omitempty"`
	// Coordinates of this widget on a dashboard. This portion of the API
	// changes frequently and is unsupported.
	Position any `json:"position,omitempty"`
	// If this is a textbox widget, the application displays this text. This
	// field is ignored if the widget contains a visualization in the
	// `visualization` field.
	Text string `json:"text,omitempty"`
	// Timestamp of the last time this object was updated.
	UpdatedAt string `json:"updated_at,omitempty"`
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL