tsm1

package
v0.10.0-rc2 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 2, 2016 License: MIT Imports: 26 Imported by: 0

Documentation

Index

Constants

View Source
const (
	CompactionTempExtension = "tmp"
	TSMFileExtension        = "tsm"
)
View Source
const (
	// BlockFloat64 designates a block encodes float64 values
	BlockFloat64 = byte(0)

	// BlockInt64 designates a block encodes int64 values
	BlockInt64 = byte(1)

	// BlockBool designates a block encodes bool values
	BlockBool = byte(2)

	// BlockString designates a block encodes string values
	BlockString = byte(3)
)
View Source
const (
	// DefaultSegmentSize of 10MB is the size at which segment files will be rolled over
	DefaultSegmentSize = 10 * 1024 * 1024

	// FileExtension is the file extension we expect for wal segments
	WALFileExtension = "wal"

	WALFilePrefix = "_"
)
View Source
const (
	// MagicNumber is written as the first 4 bytes of a data file to
	// identify the file as a tsm1 formatted file
	MagicNumber uint32 = 0x16D116D1

	Version byte = 1
)

Variables

View Source
var (
	MaxTime = time.Unix(0, math.MaxInt64)
	MinTime = time.Unix(0, 0)
)
View Source
var (
	ErrNoValues  = fmt.Errorf("no values written")
	ErrTSMClosed = fmt.Errorf("tsm file closed")
)
View Source
var ErrCacheInvalidCheckpoint = fmt.Errorf("invalid checkpoint")
View Source
var ErrCacheMemoryExceeded = fmt.Errorf("cache maximum memory size exceeded")
View Source
var ErrWALClosed = fmt.Errorf("WAL closed")

Functions

func BlockCount

func BlockCount(block []byte) int

func BlockType

func BlockType(block []byte) (byte, error)

BlockType returns the type of value encoded in a block or an error if the block type is unknown.

func CountTimestamps

func CountTimestamps(b []byte) int

func NewEngine

func NewEngine(path string, walPath string, opt tsdb.EngineOptions) tsdb.Engine

NewEngine returns a new instance of Engine.

func NewMultiFieldCursor

func NewMultiFieldCursor(fields []string, cursors []tsdb.Cursor, ascending bool) tsdb.Cursor

NewMultiFieldCursor returns an instance of Cursor that joins the results of cursors.

func ParseTSMFileName

func ParseTSMFileName(name string) (int, int, error)

ParseTSMFileName parses the generation and sequence from a TSM file name.

func SeriesFieldKey

func SeriesFieldKey(seriesKey, field string) string

SeriesFieldKey combine a series key and field name for a unique string to be hashed to a numeric ID

func ZigZagDecode

func ZigZagDecode(v uint64) int64

ZigZagDecode converts a previously zigzag encoded uint64 back to a int64

func ZigZagEncode

func ZigZagEncode(x int64) uint64

ZigZagEncode converts a int64 to a uint64 by zig zagging negative and positive values across even and odd numbers. Eg. [0,-1,1,-2] becomes [0, 1, 2, 3]

Types

type BlockIterator

type BlockIterator struct {
	// contains filtered or unexported fields
}

BlockIterator allows iterating over each block in a TSM file in order. It provides raw access to the block bytes without decoding them.

func (*BlockIterator) Next

func (b *BlockIterator) Next() bool

func (*BlockIterator) PeekNext

func (b *BlockIterator) PeekNext() string

func (*BlockIterator) Read

func (b *BlockIterator) Read() (string, time.Time, time.Time, []byte, error)

type BoolDecoder

type BoolDecoder interface {
	Next() bool
	Read() bool
	Error() error
}

BoolDecoder decodes a series of bools from an in-memory buffer.

func NewBoolDecoder

func NewBoolDecoder(b []byte) BoolDecoder

NewBoolDecoder returns a new instance of BoolDecoder.

type BoolEncoder

type BoolEncoder interface {
	Write(b bool)
	Bytes() ([]byte, error)
}

BoolEncoder encodes a series of bools to an in-memory buffer.

func NewBoolEncoder

func NewBoolEncoder() BoolEncoder

NewBoolEncoder returns a new instance of BoolEncoder.

type BoolValue

type BoolValue struct {
	// contains filtered or unexported fields
}

func DecodeBoolBlock

func DecodeBoolBlock(block []byte, a []BoolValue) ([]BoolValue, error)

func (*BoolValue) Size

func (b *BoolValue) Size() int

func (*BoolValue) String

func (f *BoolValue) String() string

func (*BoolValue) Time

func (b *BoolValue) Time() time.Time

func (*BoolValue) UnixNano

func (b *BoolValue) UnixNano() int64

func (*BoolValue) Value

func (b *BoolValue) Value() interface{}

type Cache

type Cache struct {
	// contains filtered or unexported fields
}

Cache maintains an in-memory store of Values for a set of keys.

func NewCache

func NewCache(maxSize uint64) *Cache

NewCache returns an instance of a cache which will use a maximum of maxSize bytes of memory.

func (*Cache) ClearSnapshot

func (c *Cache) ClearSnapshot(snapshot *Cache)

ClearSnapshot will remove the snapshot cache from the list of flushing caches and adjust the size

func (*Cache) Deduplicate

func (c *Cache) Deduplicate()

Deduplicate sorts the snapshot before returning it. The compactor and any queries coming in while it writes will need the values sorted

func (*Cache) Delete

func (c *Cache) Delete(keys []string)

Delete will remove the keys from the cache

func (*Cache) Keys

func (c *Cache) Keys() []string

Keys returns a sorted slice of all keys under management by the cache.

func (*Cache) Lock

func (c *Cache) Lock()

func (*Cache) MaxSize

func (c *Cache) MaxSize() uint64

MaxSize returns the maximum number of bytes the cache may consume.

func (*Cache) Size

func (c *Cache) Size() uint64

Size returns the number of point-calcuated bytes the cache currently uses.

func (*Cache) Snapshot

func (c *Cache) Snapshot() *Cache

Snapshot will take a snapshot of the current cache, add it to the slice of caches that are being flushed, and reset the current cache with new values

func (*Cache) Store

func (c *Cache) Store() map[string]*entry

Store returns the underlying cache store. This is not goroutine safe! Protect access by using the Lock and Unlock functions on Cache.

func (*Cache) Unlock

func (c *Cache) Unlock()

func (*Cache) Values

func (c *Cache) Values(key string) Values

Values returns a copy of all values, deduped and sorted, for the given key.

func (*Cache) Write

func (c *Cache) Write(key string, values []Value) error

Write writes the set of values for the key to the cache. This function is goroutine-safe. It returns an error if the cache has exceeded its max size.

func (*Cache) WriteMulti

func (c *Cache) WriteMulti(values map[string][]Value) error

WriteMulti writes the map of keys and associated values to the cache. This function is goroutine-safe. It returns an error if the cache has exceeded its max size.

type CacheLoader

type CacheLoader struct {
	Logger *log.Logger
	// contains filtered or unexported fields
}

CacheLoader processes a set of WAL segment files, and loads a cache with the data contained within those files. Processing of the supplied files take place in the order they exist in the files slice.

func NewCacheLoader

func NewCacheLoader(files []string) *CacheLoader

NewCacheLoader returns a new instance of a CacheLoader.

func (*CacheLoader) Load

func (cl *CacheLoader) Load(cache *Cache) error

Load returns a cache loaded with the data contained within the segment files. If, during reading of a segment file, corruption is encountered, that segment file is truncated up to and including the last valid byte, and processing continues with the next segment file.

type CompactionGroup

type CompactionGroup []string

type CompactionPlanner

type CompactionPlanner interface {
	Plan(lastWrite time.Time) []CompactionGroup
	PlanLevel(level int) []CompactionGroup
}

CompactionPlanner determines what TSM files and WAL segments to include in a given compaction run.

type Compactor

type Compactor struct {
	Dir    string
	Cancel chan struct{}
	Size   int

	FileStore interface {
		NextGeneration() int
	}
}

Compactor merges multiple TSM files into new files or writes a Cache into 1 or more TSM files

func (*Compactor) Clone

func (c *Compactor) Clone() *Compactor

Clone will return a new compactor that can be used even if the engine is closed

func (*Compactor) CompactFast

func (c *Compactor) CompactFast(tsmFiles []string) ([]string, error)

Compact will write multiple smaller TSM files into 1 or more larger files

func (*Compactor) CompactFull

func (c *Compactor) CompactFull(tsmFiles []string) ([]string, error)

Compact will write multiple smaller TSM files into 1 or more larger files

func (*Compactor) WriteSnapshot

func (c *Compactor) WriteSnapshot(cache *Cache) ([]string, error)

WriteSnapshot will write a Cache snapshot to a new TSM files.

type DefaultPlanner

type DefaultPlanner struct {
	FileStore interface {
		Stats() []FileStat
		LastModified() time.Time
		BlockCount(path string, idx int) int
	}

	// CompactFullWriteColdDuration specifies the length of time after
	// which if no writes have been committed to the WAL, the engine will
	// do a full compaction of the TSM files in this shard. This duration
	// should always be greater than the CacheFlushWriteColdDuraion
	CompactFullWriteColdDuration time.Duration
	// contains filtered or unexported fields
}

DefaultPlanner implements CompactionPlanner using a strategy to roll up multiple generations of TSM files into larger files in stages. It attempts to minimize the number of TSM files on disk while rolling up a bounder number of files.

func (*DefaultPlanner) Plan

func (c *DefaultPlanner) Plan(lastWrite time.Time) []CompactionGroup

Plan returns a set of TSM files to rewrite for level 4 or higher. The planning returns multiple groups if possible to allow compactions to run concurrently.

func (*DefaultPlanner) PlanLevel

func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup

PlanLevel returns a set of TSM files to rewrite for a specific level

type DeleteWALEntry

type DeleteWALEntry struct {
	Keys []string
}

DeleteWALEntry represents the deletion of multiple series.

func (*DeleteWALEntry) Encode

func (w *DeleteWALEntry) Encode(dst []byte) ([]byte, error)

func (*DeleteWALEntry) MarshalBinary

func (w *DeleteWALEntry) MarshalBinary() ([]byte, error)

func (*DeleteWALEntry) Type

func (w *DeleteWALEntry) Type() WalEntryType

func (*DeleteWALEntry) UnmarshalBinary

func (w *DeleteWALEntry) UnmarshalBinary(b []byte) error

type EmptyValue

type EmptyValue struct {
}

func (*EmptyValue) Size

func (e *EmptyValue) Size() int

func (*EmptyValue) String

func (e *EmptyValue) String() string

func (*EmptyValue) Time

func (e *EmptyValue) Time() time.Time

func (*EmptyValue) UnixNano

func (e *EmptyValue) UnixNano() int64

func (*EmptyValue) Value

func (e *EmptyValue) Value() interface{}

type Engine

type Engine struct {
	WAL            *WAL
	Cache          *Cache
	Compactor      *Compactor
	CompactionPlan CompactionPlanner
	FileStore      *FileStore

	MaxPointsPerBlock int

	// CacheFlushMemorySizeThreshold specifies the minimum size threshodl for
	// the cache when the engine should write a snapshot to a TSM file
	CacheFlushMemorySizeThreshold uint64

	// CacheFlushWriteColdDuration specifies the length of time after which if
	// no writes have been committed to the WAL, the engine will write
	// a snapshot of the cache to a TSM file
	CacheFlushWriteColdDuration time.Duration
	// contains filtered or unexported fields
}

Engine represents a storage engine with compressed blocks.

func (*Engine) Backup

func (e *Engine) Backup(w io.Writer, basePath string, since time.Time) error

Backup will write a tar archive of any TSM files modified since the passed in time to the passed in writer. The basePath will be prepended to the names of the files in the archive. It will force a snapshot of the WAL first then perform the backup with a read lock against the file store. This means that new TSM files will not be able to be created in this shard while the backup is running. For shards that are still acively getting writes, this could cause the WAL to backup, increasing memory usage and evenutally rejecting writes.

func (*Engine) Begin

func (e *Engine) Begin(writable bool) (tsdb.Tx, error)

Begin starts a new transaction on the engine.

func (*Engine) Close

func (e *Engine) Close() error

Close closes the engine.

func (*Engine) DeleteMeasurement

func (e *Engine) DeleteMeasurement(name string, seriesKeys []string) error

DeleteMeasurement deletes a measurement and all related series.

func (*Engine) DeleteSeries

func (e *Engine) DeleteSeries(seriesKeys []string) error

DeleteSeries deletes the series from the engine.

func (*Engine) Format

func (e *Engine) Format() tsdb.EngineFormat

Format returns the format type of this engine

func (*Engine) KeyCursor

func (e *Engine) KeyCursor(key string) *KeyCursor

func (*Engine) LoadMetadataIndex

func (e *Engine) LoadMetadataIndex(_ *tsdb.Shard, index *tsdb.DatabaseIndex, measurementFields map[string]*tsdb.MeasurementFields) error

LoadMetadataIndex loads the shard metadata into memory.

func (*Engine) Open

func (e *Engine) Open() error

Open opens and initializes the engine.

func (*Engine) Path

func (e *Engine) Path() string

Path returns the path the engine was opened with.

func (*Engine) PerformMaintenance

func (e *Engine) PerformMaintenance()

PerformMaintenance is for periodic maintenance of the store. A no-op for b1

func (*Engine) SeriesCount

func (e *Engine) SeriesCount() (n int, err error)

SeriesCount returns the number of series buckets on the shard.

func (*Engine) SetLogOutput

func (e *Engine) SetLogOutput(w io.Writer)

SetLogOutput is a no-op.

func (*Engine) ShouldCompactCache

func (e *Engine) ShouldCompactCache(lastWriteTime time.Time) bool

ShouldCompactCache returns true if the Cache is over its flush threshold or if the passed in lastWriteTime is older than the write cold threshold

func (*Engine) WritePoints

func (e *Engine) WritePoints(points []models.Point, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error

WritePoints writes metadata and point data into the engine. Returns an error if new points are added to an existing key.

func (*Engine) WriteSnapshot

func (e *Engine) WriteSnapshot() error

WriteSnapshot will snapshot the cache and write a new TSM file with its contents, releasing the snapshot when done.

func (*Engine) WriteTo

func (e *Engine) WriteTo(w io.Writer) (n int64, err error)

type FileStat

type FileStat struct {
	Path             string
	HasTombstone     bool
	Size             uint32
	LastModified     time.Time
	MinTime, MaxTime time.Time
	MinKey, MaxKey   string
}

func (FileStat) ContainsKey

func (f FileStat) ContainsKey(key string) bool

func (FileStat) OverlapsKeyRange

func (f FileStat) OverlapsKeyRange(min, max string) bool

func (FileStat) OverlapsTimeRange

func (f FileStat) OverlapsTimeRange(min, max time.Time) bool

type FileStore

type FileStore struct {
	Logger *log.Logger
	// contains filtered or unexported fields
}

func NewFileStore

func NewFileStore(dir string) *FileStore

func (*FileStore) Add

func (f *FileStore) Add(files ...TSMFile)

func (*FileStore) BlockCount

func (f *FileStore) BlockCount(path string, idx int) int

BlockCount returns number of values stored in the block at location idx in the file at path. If path does not match any file in the store, 0 is returned. If idx is out of range for the number of blocks in the file, 0 is returned.

func (*FileStore) Close

func (f *FileStore) Close() error

func (*FileStore) Count

func (f *FileStore) Count() int

Returns the number of TSM files currently loaded

func (*FileStore) CurrentGeneration

func (f *FileStore) CurrentGeneration() int

CurrentGeneration returns the current generation of the TSM files

func (*FileStore) Delete

func (f *FileStore) Delete(keys []string) error

func (*FileStore) KeyCursor

func (f *FileStore) KeyCursor(key string) *KeyCursor

func (*FileStore) Keys

func (f *FileStore) Keys() []string

func (*FileStore) LastModified

func (f *FileStore) LastModified() time.Time

LastModified returns the last time the file store was updated with new TSM files or a delete

func (*FileStore) NextGeneration

func (f *FileStore) NextGeneration() int

NextGeneration returns the max file ID + 1

func (*FileStore) Open

func (f *FileStore) Open() error

func (*FileStore) Read

func (f *FileStore) Read(key string, t time.Time) ([]Value, error)

func (*FileStore) Remove

func (f *FileStore) Remove(paths ...string)

Remove removes the files with matching paths from the set of active files. It does not remove the paths from disk.

func (*FileStore) Replace

func (f *FileStore) Replace(oldFiles, newFiles []string) error

func (*FileStore) Stats

func (f *FileStore) Stats() []FileStat

func (*FileStore) Type

func (f *FileStore) Type(key string) (byte, error)

type FloatDecoder

type FloatDecoder struct {
	// contains filtered or unexported fields
}

FloatDecoder decodes a byte slice into multipe float64 values

func NewFloatDecoder

func NewFloatDecoder(b []byte) (*FloatDecoder, error)

func (*FloatDecoder) Error

func (it *FloatDecoder) Error() error

func (*FloatDecoder) Next

func (it *FloatDecoder) Next() bool

func (*FloatDecoder) Values

func (it *FloatDecoder) Values() float64

type FloatEncoder

type FloatEncoder struct {
	// contains filtered or unexported fields
}

FloatEncoder encodes multiple float64s into a byte slice

func NewFloatEncoder

func NewFloatEncoder() *FloatEncoder

func (*FloatEncoder) Bytes

func (s *FloatEncoder) Bytes() ([]byte, error)

func (*FloatEncoder) Finish

func (s *FloatEncoder) Finish()

func (*FloatEncoder) Push

func (s *FloatEncoder) Push(v float64)

type FloatValue

type FloatValue struct {
	// contains filtered or unexported fields
}

func DecodeFloatBlock

func DecodeFloatBlock(block []byte, a []FloatValue) ([]FloatValue, error)

func (*FloatValue) Size

func (f *FloatValue) Size() int

func (*FloatValue) String

func (f *FloatValue) String() string

func (*FloatValue) Time

func (f *FloatValue) Time() time.Time

func (*FloatValue) UnixNano

func (f *FloatValue) UnixNano() int64

func (*FloatValue) Value

func (f *FloatValue) Value() interface{}

type IndexEntry

type IndexEntry struct {

	// The min and max time of all points stored in the block.
	MinTime, MaxTime time.Time

	// The absolute position in the file where this block is located.
	Offset int64

	// The size in bytes of the block in the file.
	Size uint32
}

IndexEntry is the index information for a given block in a TSM file.

func (*IndexEntry) Contains

func (e *IndexEntry) Contains(t time.Time) bool

Returns true if this IndexEntry may contain values for the given time. The min and max times are inclusive.

func (*IndexEntry) OverlapsTimeRange

func (e *IndexEntry) OverlapsTimeRange(min, max time.Time) bool

func (*IndexEntry) String

func (e *IndexEntry) String() string

func (*IndexEntry) UnmarshalBinary

func (e *IndexEntry) UnmarshalBinary(b []byte) error

type Int64Decoder

type Int64Decoder interface {
	Next() bool
	Read() int64
	Error() error
}

Int64Decoder decodes a byte slice into int64s

func NewInt64Decoder

func NewInt64Decoder(b []byte) Int64Decoder

type Int64Encoder

type Int64Encoder interface {
	Write(v int64)
	Bytes() ([]byte, error)
}

Int64Encoder encoders int64 into byte slices

func NewInt64Encoder

func NewInt64Encoder() Int64Encoder

type Int64Value

type Int64Value struct {
	// contains filtered or unexported fields
}

func DecodeInt64Block

func DecodeInt64Block(block []byte, a []Int64Value) ([]Int64Value, error)

func (*Int64Value) Size

func (v *Int64Value) Size() int

func (*Int64Value) String

func (f *Int64Value) String() string

func (*Int64Value) Time

func (v *Int64Value) Time() time.Time

func (*Int64Value) UnixNano

func (v *Int64Value) UnixNano() int64

func (*Int64Value) Value

func (v *Int64Value) Value() interface{}

type KeyCursor

type KeyCursor struct {
	// contains filtered or unexported fields
}

func (*KeyCursor) Close

func (c *KeyCursor) Close()

func (*KeyCursor) Next

func (c *KeyCursor) Next(ascending bool) ([]Value, error)

func (*KeyCursor) SeekTo

func (c *KeyCursor) SeekTo(t time.Time, ascending bool) ([]Value, error)

type KeyIterator

type KeyIterator interface {
	Next() bool
	Read() (string, time.Time, time.Time, []byte, error)
	Close() error
}

KeyIterator allows iteration over set of keys and values in sorted order.

func NewCacheKeyIterator

func NewCacheKeyIterator(cache *Cache, size int) KeyIterator

func NewTSMKeyIterator

func NewTSMKeyIterator(size int, fast bool, readers ...*TSMReader) (KeyIterator, error)

type SegmentInfo

type SegmentInfo struct {
	// contains filtered or unexported fields
}

SegmentInfo represents metadata about a segment.

type StringDecoder

type StringDecoder interface {
	Next() bool
	Read() string
	Error() error
}

func NewStringDecoder

func NewStringDecoder(b []byte) (StringDecoder, error)

type StringEncoder

type StringEncoder interface {
	Write(s string)
	Bytes() ([]byte, error)
}

func NewStringEncoder

func NewStringEncoder() StringEncoder

type StringValue

type StringValue struct {
	// contains filtered or unexported fields
}

func DecodeStringBlock

func DecodeStringBlock(block []byte, a []StringValue) ([]StringValue, error)

func (*StringValue) Size

func (v *StringValue) Size() int

func (*StringValue) String

func (f *StringValue) String() string

func (*StringValue) Time

func (v *StringValue) Time() time.Time

func (*StringValue) UnixNano

func (v *StringValue) UnixNano() int64

func (*StringValue) Value

func (v *StringValue) Value() interface{}

type TSMFile

type TSMFile interface {
	// Path returns the underlying file path for the TSMFile.  If the file
	// has not be written or loaded from disk, the zero value is returne.
	Path() string

	// Read returns all the values in the block where time t resides
	Read(key string, t time.Time) ([]Value, error)

	// Read returns all the values in the block identified by entry.
	ReadAt(entry *IndexEntry, values []Value) ([]Value, error)

	// Entries returns the index entries for all blocks for the given key.
	Entries(key string) []*IndexEntry

	// Returns true if the TSMFile may contain a value with the specified
	// key and time
	ContainsValue(key string, t time.Time) bool

	// Contains returns true if the file contains any values for the given
	// key.
	Contains(key string) bool

	// TimeRange returns the min and max time across all keys in the file.
	TimeRange() (time.Time, time.Time)

	// KeyRange returns the min and max keys in the file.
	KeyRange() (string, string)

	// Keys returns all keys contained in the file.
	Keys() []string

	// Type returns the block type of the values stored for the key.  Returns one of
	// BlockFloat64, BlockInt64, BlockBool, BlockString.  If key does not exist,
	// an error is returned.
	Type(key string) (byte, error)

	// Delete removes the keys from the set of keys available in this file.
	Delete(keys []string) error

	// HasTombstones returns true if file contains values that have been deleted.
	HasTombstones() bool

	// TombstoneFiles returns the tombstone filestats if there are any tombstones
	// written for this file.
	TombstoneFiles() []FileStat

	// Close the underlying file resources
	Close() error

	// Size returns the size of the file on disk in bytes.
	Size() uint32

	// Remove deletes the file from the filesystem
	Remove() error

	// Stats returns summary information about the TSM file.
	Stats() FileStat

	// BlockIterator returns an iterator pointing to the first block in the file and
	// allows sequential iteration to each every block.
	BlockIterator() *BlockIterator
}

type TSMIndex

type TSMIndex interface {

	// Add records a new block entry for a key in the index.
	Add(key string, blockType byte, minTime, maxTime time.Time, offset int64, size uint32)

	// Delete removes the given keys from the index.
	Delete(keys []string)

	// Contains return true if the given key exists in the index.
	Contains(key string) bool

	// ContainsValue returns true if key and time might exists in this file.  This function could
	// return true even though the actual point does not exists.  For example, the key may
	// exists in this file, but not have point exactly at time t.
	ContainsValue(key string, timestamp time.Time) bool

	// Entries returns all index entries for a key.
	Entries(key string) []*IndexEntry

	// Entry returns the index entry for the specified key and timestamp.  If no entry
	// matches the key and timestamp, nil is returned.
	Entry(key string, timestamp time.Time) *IndexEntry

	// Keys returns the unique set of keys in the index.
	Keys() []string

	// Key returns the key in the index at the given postion.
	Key(index int) (string, []*IndexEntry)

	// KeyAt returns the key in the index at the given postion.
	KeyAt(index int) string

	// KeyCount returns the count of unique keys in the index.
	KeyCount() int

	// Size returns the size of a the current index in bytes
	Size() uint32

	// TimeRange returns the min and max time across all keys in the file.
	TimeRange() (time.Time, time.Time)

	// KeyRange returns the min and max keys in the file.
	KeyRange() (string, string)

	// Type returns the block type of the values stored for the key.  Returns one of
	// BlockFloat64, BlockInt64, BlockBool, BlockString.  If key does not exist,
	// an error is returned.
	Type(key string) (byte, error)

	// MarshalBinary returns a byte slice encoded version of the index.
	MarshalBinary() ([]byte, error)

	// UnmarshalBinary populates an index from an encoded byte slice
	// representation of an index.
	UnmarshalBinary(b []byte) error

	// Write writes the index contents to a writer
	Write(w io.Writer) error
}

TSMIndex represent the index section of a TSM file. The index records all blocks, their locations, sizes, min and max times.

func NewDirectIndex

func NewDirectIndex() TSMIndex

func NewIndirectIndex

func NewIndirectIndex() TSMIndex

type TSMReader

type TSMReader struct {
	// contains filtered or unexported fields
}

func NewTSMReader

func NewTSMReader(r io.ReadSeeker) (*TSMReader, error)

func NewTSMReaderWithOptions

func NewTSMReaderWithOptions(opt TSMReaderOptions) (*TSMReader, error)

func (*TSMReader) BlockIterator

func (t *TSMReader) BlockIterator() *BlockIterator

func (*TSMReader) Close

func (t *TSMReader) Close() error

func (*TSMReader) Contains

func (t *TSMReader) Contains(key string) bool

func (*TSMReader) ContainsValue

func (t *TSMReader) ContainsValue(key string, ts time.Time) bool

ContainsValue returns true if key and time might exists in this file. This function could return true even though the actual point does not exists. For example, the key may exists in this file, but not have point exactly at time t.

func (*TSMReader) Delete

func (t *TSMReader) Delete(keys []string) error

func (*TSMReader) Entries

func (t *TSMReader) Entries(key string) []*IndexEntry

func (*TSMReader) HasTombstones

func (t *TSMReader) HasTombstones() bool

HasTombstones return true if there are any tombstone entries recorded.

func (*TSMReader) IndexSize

func (t *TSMReader) IndexSize() uint32

func (*TSMReader) Key

func (t *TSMReader) Key(index int) (string, []*IndexEntry)

func (*TSMReader) KeyAt

func (t *TSMReader) KeyAt(idx int) string

func (*TSMReader) KeyCount

func (t *TSMReader) KeyCount() int

func (*TSMReader) KeyRange

func (t *TSMReader) KeyRange() (string, string)

KeyRange returns the min and max key across all keys in the file.

func (*TSMReader) Keys

func (t *TSMReader) Keys() []string

func (*TSMReader) LastModified

func (t *TSMReader) LastModified() time.Time

func (*TSMReader) Path

func (t *TSMReader) Path() string

func (*TSMReader) Read

func (t *TSMReader) Read(key string, timestamp time.Time) ([]Value, error)

func (*TSMReader) ReadAll

func (t *TSMReader) ReadAll(key string) ([]Value, error)

ReadAll returns all values for a key in all blocks.

func (*TSMReader) ReadAt

func (t *TSMReader) ReadAt(entry *IndexEntry, vals []Value) ([]Value, error)

func (*TSMReader) Remove

func (t *TSMReader) Remove() error

Remove removes any underlying files stored on disk for this reader.

func (*TSMReader) Size

func (t *TSMReader) Size() uint32

func (*TSMReader) Stats

func (t *TSMReader) Stats() FileStat

func (*TSMReader) TimeRange

func (t *TSMReader) TimeRange() (time.Time, time.Time)

TimeRange returns the min and max time across all keys in the file.

func (*TSMReader) TombstoneFiles

func (t *TSMReader) TombstoneFiles() []FileStat

TombstoneFiles returns any tombstone files associated with this TSM file.

func (*TSMReader) Type

func (t *TSMReader) Type(key string) (byte, error)

type TSMReaderOptions

type TSMReaderOptions struct {
	// Reader is used to create file IO based reader.
	Reader io.ReadSeeker

	// MMAPFile is used to create an MMAP based reader.
	MMAPFile *os.File
}

type TSMWriter

type TSMWriter interface {
	// Write writes a new block for key containing and values.  Writes append
	// blocks in the order that the Write function is called.  The caller is
	// responsible for ensuring keys and blocks or sorted appropriately.
	// Values are encoded as a full block.  The caller is responsible for
	// ensuring a fixed number of values are encoded in each block as wells as
	// ensuring the Values are sorted. The first and last timestamp values are
	// used as the minimum and maximum values for the index entry.
	Write(key string, values Values) error

	// WriteBlock writes a new block for key containing the bytes in block.  WriteBlock appends
	// blocks in the order that the WriteBlock function is called.  The caller is
	// responsible for ensuring keys and blocks are sorted appropriately, and that the
	// block and index information is correct for the block.  The minTime and maxTime
	// timestamp values are used as the minimum and maximum values for the index entry.
	WriteBlock(key string, minTime, maxTime time.Time, block []byte) error

	// WriteIndex finishes the TSM write streams and writes the index.
	WriteIndex() error

	// Closes any underlying file resources.
	Close() error

	// Size returns the current size in bytes of the file
	Size() uint32
}

TSMWriter writes TSM formatted key and values.

func NewTSMWriter

func NewTSMWriter(w io.Writer) (TSMWriter, error)

type TimeDecoder

type TimeDecoder interface {
	Next() bool
	Read() time.Time
	Error() error
}

TimeDecoder decodes byte slices to time.Time values.

func NewTimeDecoder

func NewTimeDecoder(b []byte) TimeDecoder

type TimeEncoder

type TimeEncoder interface {
	Write(t time.Time)
	Bytes() ([]byte, error)
}

TimeEncoder encodes time.Time to byte slices.

func NewTimeEncoder

func NewTimeEncoder() TimeEncoder

NewTimeEncoder returns a TimeEncoder

type Tombstoner

type Tombstoner struct {

	// Path is the location of the file to record tombstone. This should be the
	// full path to a TSM file.
	Path string
	// contains filtered or unexported fields
}

func (*Tombstoner) Add

func (t *Tombstoner) Add(keys []string) error

func (*Tombstoner) Delete

func (t *Tombstoner) Delete() error

func (*Tombstoner) HasTombstones

func (t *Tombstoner) HasTombstones() bool

HasTombstones return true if there are any tombstone entries recorded.

func (*Tombstoner) ReadAll

func (t *Tombstoner) ReadAll() ([]string, error)

func (*Tombstoner) TombstoneFiles

func (t *Tombstoner) TombstoneFiles() []FileStat

TombstoneFiles returns any tombstone files associated with this TSM file.

type Value

type Value interface {
	Time() time.Time
	UnixNano() int64
	Value() interface{}
	Size() int
	String() string
}

func DecodeBlock

func DecodeBlock(block []byte, vals []Value) ([]Value, error)

DecodeBlock takes a byte array and will decode into values of the appropriate type based on the block.

func NewValue

func NewValue(t time.Time, value interface{}) Value

type Values

type Values []Value

Values represented a time ascending sorted collection of Value types. the underlying type should be the same across all values, but the interface makes the code cleaner.

func (Values) Deduplicate

func (a Values) Deduplicate() Values

Deduplicate returns a new Values slice with any values that have the same timestamp removed. The Value that appears last in the slice is the one that is kept.

func (Values) Encode

func (a Values) Encode(buf []byte) ([]byte, error)

Encode converts the values to a byte slice. If there are no values, this function panics.

func (Values) InfluxQLType

func (a Values) InfluxQLType() (influxql.DataType, error)

InfluxQLType returns the influxql.DataType the values map to.

func (Values) Len

func (a Values) Len() int

Sort methods

func (Values) Less

func (a Values) Less(i, j int) bool

func (Values) MaxTime

func (a Values) MaxTime() int64

func (Values) MinTime

func (a Values) MinTime() int64

func (Values) Size

func (a Values) Size() int

func (Values) Swap

func (a Values) Swap(i, j int)

type WAL

type WAL struct {

	// WALOutput is the writer used by the logger.
	LogOutput io.Writer

	// SegmentSize is the file size at which a segment file will be rotated
	SegmentSize int

	// LoggingEnabled specifies if detailed logs should be output
	LoggingEnabled bool
	// contains filtered or unexported fields
}

func NewWAL

func NewWAL(path string) *WAL

func (*WAL) Close

func (l *WAL) Close() error

Close will finish any flush that is currently in process and close file handles

func (*WAL) CloseSegment

func (l *WAL) CloseSegment() error

CloseSegment closes the current segment if it is non-empty and opens a new one.

func (*WAL) ClosedSegments

func (l *WAL) ClosedSegments() ([]string, error)

func (*WAL) Delete

func (l *WAL) Delete(keys []string) (int, error)

Delete deletes the given keys, returning the segment ID for the operation.

func (*WAL) LastWriteTime

func (l *WAL) LastWriteTime() time.Time

LastWriteTime is the last time anything was written to the WAL

func (*WAL) Open

func (l *WAL) Open() error

Open opens and initializes the Log. Will recover from previous unclosed shutdowns

func (*WAL) Path

func (l *WAL) Path() string

Path returns the path the log was initialized with.

func (*WAL) Remove

func (l *WAL) Remove(files []string) error

func (*WAL) WritePoints

func (l *WAL) WritePoints(values map[string][]Value) (int, error)

WritePoints writes the given points to the WAL. Returns the WAL segment ID to which the points were written. If an error is returned the segment ID should be ignored.

type WALEntry

type WALEntry interface {
	Type() WalEntryType
	Encode(dst []byte) ([]byte, error)
	MarshalBinary() ([]byte, error)
	UnmarshalBinary(b []byte) error
}

WALEntry is record stored in each WAL segment. Each entry has a type and an opaque, type dependent byte slice data attribute.

type WALSegmentReader

type WALSegmentReader struct {
	// contains filtered or unexported fields
}

WALSegmentReader reads WAL segments.

func NewWALSegmentReader

func NewWALSegmentReader(r io.ReadCloser) *WALSegmentReader

func (*WALSegmentReader) Close

func (r *WALSegmentReader) Close() error

func (*WALSegmentReader) Count

func (r *WALSegmentReader) Count() int64

Count returns the total number of bytes read successfully from the segment, as of the last call to Read(). The segment is guaranteed to be valid up to and including this number of bytes.

func (*WALSegmentReader) Error

func (r *WALSegmentReader) Error() error

func (*WALSegmentReader) Next

func (r *WALSegmentReader) Next() bool

Next indicates if there is a value to read

func (*WALSegmentReader) Read

func (r *WALSegmentReader) Read() (WALEntry, error)

type WALSegmentWriter

type WALSegmentWriter struct {
	// contains filtered or unexported fields
}

WALSegmentWriter writes WAL segments.

func NewWALSegmentWriter

func NewWALSegmentWriter(w io.WriteCloser) *WALSegmentWriter

func (*WALSegmentWriter) Write

func (w *WALSegmentWriter) Write(entryType WalEntryType, compressed []byte) error

type WalEntryType

type WalEntryType byte

WalEntryType is a byte written to a wal segment file that indicates what the following compressed block contains

const (
	WriteWALEntryType  WalEntryType = 0x01
	DeleteWALEntryType WalEntryType = 0x02
)

type WriteWALEntry

type WriteWALEntry struct {
	Values map[string][]Value
}

WriteWALEntry represents a write of points.

func (*WriteWALEntry) Encode

func (w *WriteWALEntry) Encode(dst []byte) ([]byte, error)

Encode converts the WriteWALEntry into a byte stream using dst if it is large enough. If dst is too small, the slice will be grown to fit the encoded entry.

func (*WriteWALEntry) MarshalBinary

func (w *WriteWALEntry) MarshalBinary() ([]byte, error)

func (*WriteWALEntry) Type

func (w *WriteWALEntry) Type() WalEntryType

func (*WriteWALEntry) UnmarshalBinary

func (w *WriteWALEntry) UnmarshalBinary(b []byte) error

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL