exasol

package module
v0.0.0-...-08963c5 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 4, 2024 License: MIT Imports: 25 Imported by: 0

README

go-exasol-client

A Go library for connecting to Exasol based on the Exasol websocket API.

Also consider using the Official Exasol Golang driver. As of July 2021 the primary differences were:

Feature This Driver Official Driver
Standard Golang SQL driver interface No Yes
Compression No Yes
Bulk/Streaming up/download of CSV data Yes No
Support for alternate/custom websocket libraries Yes No

Examples

import "github.com/grantstreetgroup/go-exasol-client"

func main() {
    conf := exasol.ConnConf{
        Host:     "host or ip-range",
        Port:     8563,
        Username: "user",
        Password: "pass",
    }
    conn, err = exasol.Connect(conf)
    defer conn.Disconnect()

    conn.DisableAutoCommit()

    conn.Execute("ALTER SESSION SET...")

    // To specify placeholder values you can pass in a second argument that is either
    // []interface{} or [][]interface{} depending on whether you are inserting one or many rows.
    rowsAffected, err := conn.Execute("INSERT INTO t VALUES(?,?,?)", [][]interface{}{...})

    res, err := conn.FetchSlice("SELECT * FROM t WHERE c = ?", []interface{}{...})
    for _, row := range res {
        col = row[0].(string)
    }

    // For large datasets use FetchChan to avoid buffering
    // the entire resultset in memory
    res, err = conn.FetchChan("SELECT * FROM t")
    for row := range res {
        col = row[0].(string)
    }


    // For very large datasets you can send/receive your data
    // in CSV format (stored in a bytes.Buffer) using the Bulk* methods.
    // This is the fastest way to upload or download data to Exasol.
    var csvData new(bytes.Buffer)
    csvData.WriteString("csv,data,...\n...")

    // To upload to a particular table
    err = conn.BulkInsert(schemaName, tableName, csvData)

    // To select all data from a particular table
    err = conn.BulkSelect(schemaName, tableName, csvData)
    SomeCSVParser(csvData.String())

    // To select an arbitrary query
    sql := "EXPORT (SELECT c FROM t) INTO CSV AT '%%s' FILE 'data.csv'"
    err = conn.BulkQuery(sql, csvData)
    SomeCSVParser(csvData.String())


    // For extremely large datasets that cannot fit in memory
    // You can stream your CSV data to/from any of the above Bulk methods
    // by using the equivalent Stream... method.
    // The stream consists of a chan of byte slices where each byte
    // slice is optimally around 8K in size
    csvChan := make(chan []byte, 1000) // Chan size depends on your memory
    go func {
        for {
            ...
            // Generate your CSV data in ~8K chunks
            csvChan <- []byte("csv,data...\n...")
        }
        close(csvChan)
    }
    err := conn.StreamInsert(schemaName, tableName, csvChan)


    res := conn.StreamSelect(schemaName, tableName) // Returns immediately
    // Read your CSV data in ~8K chunks
    for chunk := range res.Data {
        // chunk is a []byte with partial CSV data
    }


    conn.Commit()
}

Author

Grant Street Group developers@grantstreet.com

This software is Copyright (c) 2019 by Grant Street Group.

This is free software, licensed under:

MIT License

Contributors

Documentation

Index

Constants

View Source
const DriverVersion = "2"
View Source
const ExasolAPIVersion = 3

Variables

This section is empty.

Functions

func QuoteStr

func QuoteStr(str string) string

func Transpose

func Transpose(matrix [][]interface{}) [][]interface{}

Types

type Attributes

type Attributes struct {
	Autocommit                  bool   `json:"autocommit,omitempty"`
	CompressionEnabled          bool   `json:"compressionEnabled,omitempty"`
	CurrentSchema               string `json:"currentSchema,omitempty"`
	DateFormat                  string `json:"dateFormat,omitempty"`
	DateLanguage                string `json:"dateLanguage,omitempty"`
	DatetimeFormat              string `json:"datetimeFormat,omitempty"`
	DefaultLikeEscapeCharacter  string `json:"defaultLikeEscapeCharacter,omitempty"`
	FeedbackInterval            uint32 `json:"feedbackInterval,omitempty"`
	NumericCharacters           string `json:"numericCharacters,omitempty"`
	OpenTransaction             int    `json:"openTransaction,omitempty"` // Boolean, really (1/0)
	QueryTimeout                uint32 `json:"queryTimeout,omitempty"`
	SnapshotTransactionsEnabled bool   `json:"snapshotTransactionsEnabled,omitempty"`
	TimestampUtcEnabled         bool   `json:"timestampUtcEnabled,omitempty"`
	Timezone                    string `json:"timezone,omitempty"`
	TimeZoneBehavior            string `json:"timeZoneBehavior,omitempty"`
}

This struct needs to be visible outside this package because it is returned by GetSessionAttr

type AuthData

type AuthData struct {
	SessionID             uint64  `json:"sessionId"`
	ProtocolVersion       float64 `json:"protocolVersion"`
	ReleaseVersion        string  `json:"releaseVersion"`
	DatabaseName          string  `json:"databaseName"`
	ProductName           string  `json:"productName"`
	MaxDataMessageSize    uint64  `json:"maxDataMessageSize"`
	MaxIdentifierLength   uint64  `json:"maxIdentifierLength"`
	MaxVarcharLength      uint64  `json:"maxVarcharLength"`
	IdentifierQuoteString string  `json:"identifierQuoteString"`
	TimeZone              string  `json:"timeZone"`
	TimeZoneBehavior      string  `json:"timeZoneBehavior"`
}

type Conn

type Conn struct {
	Conf      ConnConf
	SessionID uint64
	Stats     map[string]int
	Metadata  *AuthData
	// contains filtered or unexported fields
}

func Connect

func Connect(conf ConnConf) (*Conn, error)

func (*Conn) BulkExecute

func (c *Conn) BulkExecute(sql string, data *bytes.Buffer) error

func (*Conn) BulkInsert

func (c *Conn) BulkInsert(schema, table string, data *bytes.Buffer) (err error)

func (*Conn) BulkQuery

func (c *Conn) BulkQuery(sql string, data *bytes.Buffer) error

func (*Conn) BulkSelect

func (c *Conn) BulkSelect(schema, table string, data *bytes.Buffer) (err error)

func (*Conn) Commit

func (c *Conn) Commit() error

func (*Conn) DisableAutoCommit

func (c *Conn) DisableAutoCommit() error

func (*Conn) Disconnect

func (c *Conn) Disconnect()

func (*Conn) EnableAutoCommit

func (c *Conn) EnableAutoCommit() error

func (*Conn) Execute

func (c *Conn) Execute(sql string, args ...interface{}) (rowsAffected int64, err error)

TODO change optional args into an ExecConf struct Optional args are binds, default schema, colDefs, isColumnar flag

  1. The binds are data bindings for statements containing placeholders. You can either specify it as []interface{} if there's only one row or as [][]interface{} if there are multiple rows.
  2. Specifying the default schema allows you to use non-schema-qualified table identifiers in the statement even when you have no schema currently open.
  3. The colDefs option expects a []DataTypes. This is only necessary if you are working around a bug that existed in pre-v6.0.9 of Exasol (https://www.exasol.com/support/browse/EXASOL-2138)
  4. The isColumnar boolean indicates whether the binds specified in the first optional arg are in columnar format (By default the are in row format.)

func (*Conn) FetchChan

func (c *Conn) FetchChan(sql string, args ...interface{}) (<-chan []interface{}, error)

Optional args are binds, and default schema

  1. The binds are data bindings for queries containing placeholders. You can specify it []interface{}
  2. Specifying the default schema allows you to use non-schema-qualified table identifiers in the statement even when you have no schema currently open.

func (*Conn) FetchSlice

func (c *Conn) FetchSlice(sql string, args ...interface{}) (res [][]interface{}, err error)

For large datasets use FetchChan to avoid buffering all the data in memory

func (*Conn) GetSessionAttr

func (c *Conn) GetSessionAttr() (*Attributes, error)

func (*Conn) Lock

func (c *Conn) Lock()

Gets a sync.Mutext lock on the handle. Allows coordinating use of the handle across multiple Go routines

func (*Conn) QuoteIdent

func (c *Conn) QuoteIdent(ident string, args ...interface{}) string

func (*Conn) Rollback

func (c *Conn) Rollback() error

func (*Conn) SetTimeout

func (c *Conn) SetTimeout(timeout uint32) error

func (*Conn) StreamExecute

func (c *Conn) StreamExecute(origSQL string, data <-chan []byte) error

func (*Conn) StreamInsert

func (c *Conn) StreamInsert(schema, table string, data <-chan []byte) (err error)

func (*Conn) StreamQuery

func (c *Conn) StreamQuery(exportSQL string) *Rows

func (*Conn) StreamSelect

func (c *Conn) StreamSelect(schema, table string) *Rows

func (*Conn) Unlock

func (c *Conn) Unlock()

type ConnConf

type ConnConf struct {
	Host           string
	Port           uint16
	Username       string
	Password       string
	ClientName     string
	ClientVersion  string
	ConnectTimeout time.Duration
	QueryTimeout   time.Duration
	TLSConfig      *tls.Config
	SuppressError  bool // Server errors are logged to Error by default
	// TODO try compressionEnabled: true
	Logger         Logger    // Optional for better control over logging
	WSHandler      WSHandler // Optional for intercepting websocket traffic
	CachePrepStmts bool

	Timeout uint32 // Deprecated - Use Query/ConnectTimeout instead
}

type DataType

type DataType struct {
	Type              string `json:"type"`
	Precision         int    `json:"precision"`
	Scale             int    `json:"scale"`
	Size              int    `json:"size"`
	CharacterSet      string `json:"characterSet,omitempty"`
	WithLocalTimeZone bool   `json:"withLocalTimeZone,omitempty"`
	Fraction          int    `json:"fraction,omitempty"`
	SRId              int    `json:"srid,omitempty"`
}

This is visible outside of this package because it is passed in as a connection parameter

type Logger

type Logger interface {
	Debug(...interface{})
	Debugf(string, ...interface{})

	Info(...interface{})
	Infof(string, ...interface{})

	Warning(...interface{})
	Warningf(string, ...interface{})

	Error(...interface{})
	Errorf(string, ...interface{})
}

type Proxy

type Proxy struct {
	Host string
	Port uint32
	// contains filtered or unexported fields
}

func NewProxy

func NewProxy(host string, port uint16, bufPool *sync.Pool, log Logger) (*Proxy, error)

func (*Proxy) IsRunning

func (p *Proxy) IsRunning() bool

func (*Proxy) Read

func (p *Proxy) Read(data chan<- []byte, stop <-chan bool) (int64, error)

func (*Proxy) Shutdown

func (p *Proxy) Shutdown()

func (*Proxy) Write

func (p *Proxy) Write(data <-chan []byte) (bytesWritten int64, err error)

type Rows

type Rows struct {
	BytesRead int64
	Data      chan []byte
	Pool      *sync.Pool // Use this to return the []bytes
	Error     error
	// contains filtered or unexported fields
}

func (*Rows) Close

func (r *Rows) Close()

type WSHandler

type WSHandler interface {
	// tls.Config is optional. If specified SSL should be enabled
	// time.Duration is the connect timeout (or zero for none)
	Connect(url.URL, *tls.Config, time.Duration) error
	EnableCompression(bool)
	// Write/ReadJSON will be passed structs from api.go
	WriteJSON(interface{}) error
	ReadJSON(interface{}) error
	Close()
}

By default we use the gorilla/websocket implementation however you can also specify a custom websocket handler which you can then use to intercept API traffic. This is handy for:

  1. Using a non-gorilla websocket library
  2. Emulating Exasol for testing purposes
  3. Intercepting and manipulating the traffic (e.g. for buffering, caching etc)

See websocket_handler.go for the default implementation. The custom websocket handler must conform to the following interface:

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL