immutable

package
v1.4.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 12, 2025 License: Apache-2.0 Imports: 76 Imported by: 0

Documentation

Overview

Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Copyright 2024 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Copyright 2024 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Index

Constants

View Source
const (
	ChunkMetaSize int64 = 128 * 1024 * 1024
	ChunkMetaTTL        = 60 * time.Minute
)
View Source
const (
	QueryMetaCacheTTL            = 10 * time.Minute
	QueryMetaDataCacheSize int64 = 50 * 1024 * 1024 * int64(MetaIndexLen+128*int(unsafe.Sizeof(fragment.FragmentRange{})))
)
View Source
const (
	MetaIndexLimitNum         = 16
	MetaIndexHeaderSize int64 = 16
	MetaIndexItemSize         = int64(util.Int64SizeBytes*3 + util.Uint64SizeBytes + util.Uint32SizeBytes)
)
View Source
const (
	PKMetaLimitNum   = 16
	PKMetaPrefixSize = util.Uint64SizeBytes*2 + util.Uint32SizeBytes*2
)
View Source
const (
	PKMetaInfoLength int64 = 12
	PkMetaHeaderSize       = int64(util.Uint32SizeBytes * 2)
)
View Source
const (
	PRELOAD = iota
	LOAD
)
View Source
const (
	SwapperCompressNone   = 0
	SwapperCompressSnappy = 1
	SwapperCompressZSTD   = 2
)
View Source
const (
	META_DATA_N_BYTES  int32 = 8 + 8 + 8 + 8 + 8 + 4 + 4
	META_STORE_N_BYTES int32 = META_DATA_N_BYTES + 4
	META_DATA_SIZE     int32 = META_STORE_N_BYTES + 4
)
View Source
const (
	MinMaxTimeLen        = int(unsafe.Sizeof(SegmentRange{}))
	MetaIndexLen         = int(unsafe.Sizeof(MetaIndex{}))
	DetachedMetaIndexLen = int(unsafe.Sizeof(MetaIndex{}) - 4) //count not use
)
View Source
const (
	InitParamKeyDst         string = "dst"
	InitParamKeyKeys        string = "keys"
	InitParamKeyMeasurement string = "measurement"
)
View Source
const (
	BLOOMFILTER_SIZE         = 8
	SERIESKEY_STATISTIC_SIZE = 24
	COMPRESSION_RATIO        = 2
)
View Source
const (
	ChunkMetaReadNum     = 16
	BatchReaderRecordNum = 8

	ReaderContentNumSpan  = "reader_content_num_span"
	ReaderContentSizeSpan = "reader_content_size_span"
	ReaderContentDuration = "reader_content_duration"
	ReaderFilterDuration  = "reader_filter_duration"
)
View Source
const (
	ChunkMetaCompressNone   = 0
	ChunkMetaCompressSnappy = 1
	ChunkMetaCompressLZ4    = 2
	ChunkMetaCompressSelf   = 3
	ChunkMetaCompressEnd    = 4
)
View Source
const (
	DownSampleLogDir = "downsample_log"
	ShardMoveLogDir  = "shard_move_log"

	TsspDirName        = "tssp"
	ColumnStoreDirName = obs.ColumnStoreDirName
	CountBinFile       = "count.txt"
	CapacityBinFile    = "capacity.txt"
)
View Source
const (
	DataFile        = "segment.bin"
	ChunkMetaFile   = "segment.meta"
	MetaIndexFile   = "segment.idx"
	PrimaryKeyFile  = "primary.idx"
	PrimaryMetaFile = "primary.meta"
)
View Source
const (
	CompactLevels = 7
)
View Source
const (
	DefaultLevelMergeFileNum = 4
)
View Source
const (
	FD_OUTSIDE uint32 = 0x00001
)
View Source
const MetaIndexConsumeNum = 16
View Source
const (
	MetaIndexSegmentNum = 16
)
View Source
const PKDataLimitNum = 16
View Source
const (
	SortLimitCursorDuration = "sort_limit_cursor_duration"
)

Variables

View Source
var (
	ErrCompStopped        = errors.New("compact stopped")
	ErrDownSampleStopped  = errors.New("downSample stopped")
	ErrDroppingMst        = errors.New("measurement is dropped")
	ErrParquetStopped     = errors.New("parquet task stopped")
	LevelCompactRule      = []uint16{0, 1, 0, 2, 0, 3, 0, 1, 2, 3, 0, 4, 0, 5, 0, 1, 2, 6}
	LevelCompactRuleForCs = []uint16{0, 1, 0, 1, 0, 1} // columnStore currently only doing level 0 and level 1 compaction,but the full functionality is available
	LeveLMinGroupFiles    = [CompactLevels]int{8, 4, 4, 4, 4, 4, 2}

	EnableMergeOutOfOrder = true
)
View Source
var (
	SegmentLen       = (Segment{}).bytes()
	ColumnMetaLenMin = (ColumnMeta{}).bytes(1)
	ChunkMetaMinLen  = (&ChunkMeta{}).minBytes()
)
View Source
var DDLRespDataFactory = make(map[hybridqp.DDLType]GenDDLRespDataFunc)

DDLRespDataFactory as a factory to use RespData

View Source
var DDLSequenceHandlerFactory = make(map[hybridqp.DDLType]GenSequenceHandlerFunc)

DDLSequenceHandlerFactory as a factory to use SequenceHandler

View Source
var ErrDirtyLog = errors.New("incomplete log file")
View Source
var (
	LevelMergeFileNum = []int{8, 8}
)

Functions

func AddTSSP2ParquetProcess added in v1.4.0

func AddTSSP2ParquetProcess(files ...string)

func AggregateData

func AggregateData(newRec, baseRec *record.Record, ops []*comm.CallOption) bool

func CanEncodeOneRowMode added in v1.2.0

func CanEncodeOneRowMode(col *record.ColVal) bool

func CleanTempFile added in v1.3.0

func CleanTempFile(f fileops.File)

func CompactRecovery

func CompactRecovery(path string, group *CompactGroup)

func CompareT added in v1.3.0

func CompareT[T int | int64 | float64 | string](s1, s2 T, isAscending bool) (bool, bool)

func CreateTSSPFileReader

func CreateTSSPFileReader(size int64, fd fileops.File, trailer *Trailer, tb *TableData, ver uint64, tmp bool, lockPath *string) (*tsspFileReader, error)

func DecodeAggTimes added in v1.4.0

func DecodeAggTimes(buf []byte) ([]byte, int64, int64, error)

func DecodeColumnHeader added in v1.2.0

func DecodeColumnHeader(col *record.ColVal, data []byte, colType uint8) ([]byte, []byte, error)

func DecodeColumnOfOneValue added in v1.2.0

func DecodeColumnOfOneValue(data []byte, col *record.ColVal, typ uint8)

func DelTSSP2ParquetProcess added in v1.4.0

func DelTSSP2ParquetProcess(files ...string)

func EncodeColumnHeader added in v1.2.0

func EncodeColumnHeader(col *record.ColVal, dst []byte, typ uint8) []byte

func FileOperation added in v1.0.0

func FileOperation(f TSSPFile, op func())

func FilesMergedTire added in v1.4.0

func FilesMergedTire(files []TSSPFile) uint64

func FillNilCol added in v1.1.0

func FillNilCol(col *record.ColVal, size int, ref *record.Field)

func FilterByField

func FilterByField(rec *record.Record, filterRec *record.Record, filterOption *BaseFilterOptions, con influxql.Expr, rowFilters *[]clv.RowFilter,
	tags *influx.PointTags, filterBitmap *bitmap.FilterBitmap, colAux **ColAux) *record.Record

func FilterByFieldFuncs added in v1.1.0

func FilterByFieldFuncs(rec, filterRec *record.Record, filterOption *BaseFilterOptions, filterBitmap *bitmap.FilterBitmap) *record.Record

func FilterByOpts added in v1.0.1

func FilterByOpts(rec *record.Record, opt *FilterOptions) *record.Record

func FilterByTime

func FilterByTime(rec *record.Record, tr util.TimeRange) *record.Record

func FilterByTimeDescend

func FilterByTimeDescend(rec *record.Record, tr util.TimeRange) *record.Record

func FlushRemoteEnabled added in v1.3.0

func FlushRemoteEnabled(tier uint64) bool

func GenFixRowsPerSegment added in v1.2.0

func GenFixRowsPerSegment(data *record.Record, rowNumPerSegment int) []int

func GenLogFileName added in v1.0.0

func GenLogFileName(logSeq *uint64) string

func GenParquetLogName added in v1.3.0

func GenParquetLogName() string

func GenRecByReserveIds added in v1.2.0

func GenRecByReserveIds(rec, filterRec *record.Record, rowNumber []int, redIdxMap map[int]struct{}) *record.Record

func GetBloomFilterBuf added in v1.2.0

func GetBloomFilterBuf() *bloomFilter

func GetChunkMetaCompressMode added in v1.2.0

func GetChunkMetaCompressMode() uint8

func GetCursorsBy added in v1.3.0

func GetCursorsBy(path *sparseindex.OBSFilterPath, tr util.TimeRange, isAscending bool) (int, uint64, error)

func GetDetachedFlushEnabled added in v1.3.0

func GetDetachedFlushEnabled() bool

func GetDir added in v1.1.0

func GetDir(engineType config.EngineType, path string) string

func GetMaxRowsPerSegment4TsStore added in v1.1.0

func GetMaxRowsPerSegment4TsStore() int

func GetMergeFlag4TsStore added in v1.1.0

func GetMergeFlag4TsStore() int32

func GetMetaIndexChunkCount added in v1.2.0

func GetMetaIndexChunkCount(obsOptions *obs.ObsOptions, dataPath string) (int64, error)

func GetMetaIndexOffsetAndLengthByChunkId added in v1.2.0

func GetMetaIndexOffsetAndLengthByChunkId(chunkId int64) (offset, length int64)

func GetPKItems added in v1.3.0

func GetPKItems(path string, obsOpts *obs.ObsOptions, miChunkIds []int64) (*colstore.DetachedPKMetaInfo, []*colstore.DetachedPKInfo, error)

func GetPKMetaOffsetLengthByChunkId added in v1.2.0

func GetPKMetaOffsetLengthByChunkId(pkMetaInfo *colstore.DetachedPKMetaInfo, chunkId int) (offset, length int64)

func GetSortKeyColVal added in v1.1.1

func GetSortKeyColVal(fi *FileIterator, sortKey []record.Field, ctx *ReadContext, tcDuration time.Duration, segPosition int, compactWithBlock bool) ([]*record.ColVal, []record.SortItem, error)

func GetTmpFileSuffix added in v1.1.0

func GetTmpFileSuffix() string

func InParquetProcess added in v1.3.0

func InParquetProcess(files ...string) bool

func Init

func Init()

func InitDecFunctions

func InitDecFunctions()

func InitQueryFileCache added in v1.1.0

func InitQueryFileCache(cap uint32, enable bool)

func InitWriterPool added in v1.1.0

func InitWriterPool(size int)

func IsChunkMetaCompressSelf added in v1.4.0

func IsChunkMetaCompressSelf() bool

func IsFlushToFinalFile added in v1.2.0

func IsFlushToFinalFile(totalSegmentCnt, flushToFinalFileLimit uint64) bool

func IsInterfaceNil

func IsInterfaceNil(value interface{}) bool

func IsTempleFile

func IsTempleFile(name string) bool

func LeftBound added in v1.2.0

func LeftBound(nums []uint32, target uint32, left int) int

func MarshalChunkMeta added in v1.4.0

func MarshalChunkMeta(ctx *ChunkMetaCodecCtx, cm *ChunkMeta, dst []byte) ([]byte, error)

func MarshalColumnMeta added in v1.4.0

func MarshalColumnMeta(ctx *ChunkMetaCodecCtx, col *ColumnMeta, dst []byte) []byte

func MarshalTimeRange added in v1.4.0

func MarshalTimeRange(ctx *ChunkMetaCodecCtx, sr []SegmentRange, dst []byte) []byte

func MergeRecovery

func MergeRecovery(path string, name string, ctx *MergeContext)

func MergeTimes added in v1.1.0

func MergeTimes(a []int64, b []int64, dst []int64) []int64

func NewCsImmTableImpl added in v1.2.0

func NewCsImmTableImpl() *csImmTableImpl

func NewFileLoadContext added in v1.4.0

func NewFileLoadContext() *fileLoadContext

func NewLastMergeTime added in v1.0.0

func NewLastMergeTime() *lastMergeTime

func NewMergePerformer added in v1.0.0

func NewMergePerformer(ur *UnorderedReader, stat *statistics.MergeStatItem) *mergePerformer

func NewObsWriter added in v1.2.0

func NewObsWriter(path, fileName string, obsOpts *obs.ObsOptions) (*obsWriter, error)

func NewObsWriterByFd added in v1.2.0

func NewObsWriterByFd(fd fileops.File, obsOpts *obs.ObsOptions) (*obsWriter, error)

func NewTSSPFileReader

func NewTSSPFileReader(name string, lockPath *string) (*tsspFileReader, error)

func NewTsImmTable added in v1.2.0

func NewTsImmTable() *tsImmTableImpl

func NonStreamingCompaction

func NonStreamingCompaction(fi FilesInfo) bool

func PreAggOnlyOneRow added in v1.2.0

func PreAggOnlyOneRow(buf []byte) bool

func ProcParquetLog added in v1.3.0

func ProcParquetLog(logDir string, lockPath *string, ctx *EventContext) error

func PutBloomFilterBuf added in v1.2.0

func PutBloomFilterBuf(key *bloomFilter)

func PutChunkMeta added in v1.2.0

func PutChunkMeta(filePath string, chunkMeta *ChunkMeta)

func PutDetachedSegmentTask added in v1.2.0

func PutDetachedSegmentTask(queryID string, meta IndexFrags)

func PutIDTimePairs

func PutIDTimePairs(pair *IdTimePairs)

func ReadPKDataAll added in v1.3.0

func ReadPKDataAll(path string, opts *obs.ObsOptions, offset, length []int64, meta []*colstore.DetachedPKMeta, info *colstore.DetachedPKMetaInfo) ([]*colstore.DetachedPKData, error)

func ReadPKMetaAll added in v1.3.0

func ReadPKMetaAll(path string, opts *obs.ObsOptions, offset, length []int64) ([]*colstore.DetachedPKMeta, error)

func ReadPKMetaInfoAll added in v1.3.0

func ReadPKMetaInfoAll(path string, opts *obs.ObsOptions) (*colstore.DetachedPKMetaInfo, error)

func ReadReliabilityLog added in v1.3.0

func ReadReliabilityLog(file string, dst interface{}) error

func RefFilesReader added in v1.4.0

func RefFilesReader(files ...TSSPFile)

func RegistryDDLRespData added in v1.4.0

func RegistryDDLRespData(ddl hybridqp.DDLType, f GenDDLRespDataFunc)

func RegistryDDLSequenceHandler added in v1.4.0

func RegistryDDLSequenceHandler(ddl hybridqp.DDLType, f GenSequenceHandlerFunc)

func ReleaseColumnBuilder

func ReleaseColumnBuilder(b PreAggBuilder)

func ReloadSpecifiedFiles added in v1.4.0

func ReloadSpecifiedFiles(m *MmsTables, mst string, tsspFiles *TSSPFiles)

func RemoveTsspSuffix added in v1.1.0

func RemoveTsspSuffix(dataPath string) string

func RenameIndexFiles added in v1.2.0

func RenameIndexFiles(fname string, indexList []string) error

func RenameTmpFiles

func RenameTmpFiles(newFiles []TSSPFile) error

func RenameTmpFilesWithPKIndex added in v1.1.0

func RenameTmpFilesWithPKIndex(newFiles []TSSPFile, ir *influxql.IndexRelation) error

func RenameTmpFullTextIdxFile added in v1.2.0

func RenameTmpFullTextIdxFile(msb *MsBuilder) error

func ResetAggregateData

func ResetAggregateData(newRec *record.Record, ops []*comm.CallOption)

func ResetQueryFileCache added in v1.4.0

func ResetQueryFileCache()

ResetQueryFileCache used to reset the file cache for ut

func SaveReliabilityLog added in v1.3.0

func SaveReliabilityLog(data interface{}, dir string, lockFile string, nameGenerator func() string) (string, error)

func SearchChunkMetaBlock added in v1.4.0

func SearchChunkMetaBlock(data []byte, itemCount uint32, sid uint64) []byte

func SetChunkMetaCompressMode added in v1.2.0

func SetChunkMetaCompressMode(mode int)

func SetCompactLimit

func SetCompactLimit(bytesPerSec int64, burstLimit int64)

func SetCompactionEnabled added in v1.2.0

func SetCompactionEnabled(compactionEnabled bool)

func SetDetachedFlushEnabled added in v1.2.0

func SetDetachedFlushEnabled(detachFlushEnabled bool)

func SetFragmentsNumPerFlush added in v1.1.1

func SetFragmentsNumPerFlush(fragmentsNumPerFlush int)

func SetIndexCompressMode added in v1.2.0

func SetIndexCompressMode(mode int)

func SetMaxCompactor

func SetMaxCompactor(n int)

func SetMaxFullCompactor

func SetMaxFullCompactor(n int)

func SetMaxRowsPerSegment4TsStore added in v1.1.0

func SetMaxRowsPerSegment4TsStore(maxRowsPerSegmentLimit int)

func SetMaxSegmentLimit4TsStore added in v1.1.0

func SetMaxSegmentLimit4TsStore(limit int)

func SetMergeFlag4TsStore added in v1.1.0

func SetMergeFlag4TsStore(v int32)

func SetSnapshotLimit

func SetSnapshotLimit(bytesPerSec int64, burstLimit int64)

func SetSnapshotTblNum added in v1.1.0

func SetSnapshotTblNum(snapshotTblNum int)

func SnapshotLimit

func SnapshotLimit() bool

func SumFilesSize added in v1.0.0

func SumFilesSize(files []TSSPFile) int64

func TimeSorted added in v1.2.0

func TimeSorted(sortKeys []string) bool

func UnmarshalChunkMeta added in v1.4.0

func UnmarshalChunkMeta(ctx *ChunkMetaCodecCtx, cm *ChunkMeta, buf []byte) ([]byte, error)

func UnmarshalChunkMetaAdaptive added in v1.4.0

func UnmarshalChunkMetaAdaptive(ctx *ChunkMetaCodecCtx, cm *ChunkMeta, columns []string, buf []byte) ([]byte, error)

func UnmarshalChunkMetaBaseAttr added in v1.4.0

func UnmarshalChunkMetaBaseAttr(ctx *ChunkMetaCodecCtx, cm *ChunkMeta, buf []byte) ([]byte, error)

func UnmarshalChunkMetaWithColumns added in v1.4.0

func UnmarshalChunkMetaWithColumns(ctx *ChunkMetaCodecCtx, cm *ChunkMeta, columns []string, buf []byte) ([]byte, error)

func UnmarshalColumnMeta added in v1.4.0

func UnmarshalColumnMeta(ctx *ChunkMetaCodecCtx, segCount int, col *ColumnMeta, buf []byte) ([]byte, error)

func UnmarshalColumnMetaWithoutName added in v1.4.0

func UnmarshalColumnMetaWithoutName(ctx *ChunkMetaCodecCtx, segCount int, col *ColumnMeta, buf []byte) ([]byte, error)

func UnmarshalColumnName added in v1.4.0

func UnmarshalColumnName(ctx *ChunkMetaCodecCtx, buf []byte) ([]byte, string)

func UnmarshalTimeRange added in v1.4.0

func UnmarshalTimeRange(ctx *ChunkMetaCodecCtx, cm *ChunkMeta, buf []byte) ([]byte, bool)

func UnrefFiles

func UnrefFiles(files ...TSSPFile)

func UnrefFilesReader added in v1.0.0

func UnrefFilesReader(files ...TSSPFile)

func UpdateChunkMetaFunc added in v1.2.0

func UpdateChunkMetaFunc(_, _ cache.Entry) bool

func UpdateDetachedMetaDataCache added in v1.2.0

func UpdateDetachedMetaDataCache(old, new cache.Entry) bool

func UseIndexCompressWriter added in v1.4.0

func UseIndexCompressWriter() bool

func WriteIntoFile added in v1.1.1

func WriteIntoFile(msb *MsBuilder, tmp bool, withPKIndex bool, ir *influxql.IndexRelation) error

Types

type AccumulateMetaIndex added in v1.2.0

type AccumulateMetaIndex struct {
	// contains filtered or unexported fields
}

func (*AccumulateMetaIndex) GetBlockId added in v1.2.0

func (a *AccumulateMetaIndex) GetBlockId() uint64

func (*AccumulateMetaIndex) SetAccumulateMetaIndex added in v1.2.0

func (a *AccumulateMetaIndex) SetAccumulateMetaIndex(pkDataOffset uint32, blockId uint64, dataOffset, offset int64)

type BaseFilterOptions added in v1.1.0

type BaseFilterOptions struct {
	FiltersMap    influxql.FilterMapValuer
	RedIdxMap     map[int]struct{} // redundant columns, which are not required after filtering.
	FieldsIdx     []int            // field index in schema
	FilterTags    []string         // filter tag name
	CondFunctions *binaryfilterfunc.ConditionImpl
}

type BloomFilterIterator added in v1.2.0

type BloomFilterIterator struct {
	// contains filtered or unexported fields
}

func NewBloomFilterIterator added in v1.2.0

func NewBloomFilterIterator(f *FragmentIterators, oldFiles []TSSPFile, bfCols []string) (*BloomFilterIterator, error)

func (*BloomFilterIterator) AppendFileIdx added in v1.2.0

func (bfi *BloomFilterIterator) AppendFileIdx(fileIdx int)

func (*BloomFilterIterator) Write added in v1.2.0

func (bfi *BloomFilterIterator) Write(toLocal bool) error

type BooleanPreAgg

type BooleanPreAgg struct {
	// contains filtered or unexported fields
}

func NewBooleanPreAgg

func NewBooleanPreAgg() *BooleanPreAgg

type BufferReader added in v1.0.0

type BufferReader struct {
	// contains filtered or unexported fields
}

func NewBufferReader added in v1.0.0

func NewBufferReader(maxSize uint32) *BufferReader

func (*BufferReader) Read added in v1.0.0

func (br *BufferReader) Read(offset int64, size uint32) ([]byte, error)

func (*BufferReader) Reset added in v1.0.0

func (br *BufferReader) Reset(r TSSPFile)

type CSParquetManager added in v1.4.0

type CSParquetManager struct {
	// contains filtered or unexported fields
}

func NewCSParquetManager added in v1.4.0

func NewCSParquetManager() *CSParquetManager

func (*CSParquetManager) Convert added in v1.4.0

func (m *CSParquetManager) Convert(files []TSSPFile, db string, rp string, mst string)

func (*CSParquetManager) Recover added in v1.4.0

func (m *CSParquetManager) Recover()

func (*CSParquetManager) Run added in v1.4.0

func (m *CSParquetManager) Run()

func (*CSParquetManager) Stop added in v1.4.0

func (m *CSParquetManager) Stop()

func (*CSParquetManager) Wait added in v1.4.0

func (m *CSParquetManager) Wait()

type CSParquetPlan added in v1.4.0

type CSParquetPlan struct {
	Mst      string
	Id       uint64
	DstFile  string
	TSSPFile string
	// contains filtered or unexported fields
}

func (*CSParquetPlan) BeforeRun added in v1.4.0

func (p *CSParquetPlan) BeforeRun() error

func (*CSParquetPlan) IterRecord added in v1.4.0

func (p *CSParquetPlan) IterRecord(handler func(*record.Record) error) error

func (*CSParquetPlan) SetLogFile added in v1.4.0

func (p *CSParquetPlan) SetLogFile(file string)

type CSParquetTask added in v1.4.0

type CSParquetTask struct {
	scheduler.BaseTask
	// contains filtered or unexported fields
}

func (*CSParquetTask) Execute added in v1.4.0

func (t *CSParquetTask) Execute()

func (*CSParquetTask) LockFiles added in v1.4.0

func (t *CSParquetTask) LockFiles()

func (*CSParquetTask) Stop added in v1.4.0

func (t *CSParquetTask) Stop()

func (*CSParquetTask) UnLockFiles added in v1.4.0

func (t *CSParquetTask) UnLockFiles()

type ChunkDataBuilder

type ChunkDataBuilder struct {
	// contains filtered or unexported fields
}

func NewChunkDataBuilder

func NewChunkDataBuilder(maxRowsPerSegment, maxSegmentLimit int) *ChunkDataBuilder

func (*ChunkDataBuilder) EncodeTime

func (b *ChunkDataBuilder) EncodeTime(offset int64, timeSorted bool) error

type ChunkIterator

type ChunkIterator struct {
	*FileIterator
	// contains filtered or unexported fields
}

func NewChunkIterator

func NewChunkIterator(r *FileIterator) *ChunkIterator

func (*ChunkIterator) Close

func (c *ChunkIterator) Close()

func (*ChunkIterator) GetRecord added in v1.0.0

func (c *ChunkIterator) GetRecord() *record.Record

func (*ChunkIterator) GetSeriesID added in v1.0.0

func (c *ChunkIterator) GetSeriesID() uint64

func (*ChunkIterator) IncrChunkUsed added in v1.4.0

func (c *ChunkIterator) IncrChunkUsed()

func (*ChunkIterator) Next

func (c *ChunkIterator) Next() bool

func (*ChunkIterator) WithLog

func (c *ChunkIterator) WithLog(log *Log.Logger)

type ChunkIterators

type ChunkIterators struct {
	// contains filtered or unexported fields
}

func (*ChunkIterators) Close

func (c *ChunkIterators) Close()

func (*ChunkIterators) Len

func (c *ChunkIterators) Len() int

func (*ChunkIterators) Less

func (c *ChunkIterators) Less(i, j int) bool

func (*ChunkIterators) Next

func (c *ChunkIterators) Next() (uint64, *record.Record, error)

func (*ChunkIterators) Pop

func (c *ChunkIterators) Pop() interface{}

func (*ChunkIterators) Push

func (c *ChunkIterators) Push(v interface{})

func (*ChunkIterators) Swap

func (c *ChunkIterators) Swap(i, j int)

func (*ChunkIterators) WithLog

func (c *ChunkIterators) WithLog(log *Log.Logger)

type ChunkMeta

type ChunkMeta struct {
	// contains filtered or unexported fields
}

func GetChunkMeta added in v1.2.0

func GetChunkMeta(filePath string) (*ChunkMeta, bool)

func NewChunkMeta added in v1.3.0

func NewChunkMeta(sid uint64, minT, maxT int64, count int) *ChunkMeta

use for test

func (*ChunkMeta) AllocColMeta added in v1.1.0

func (m *ChunkMeta) AllocColMeta(ref *record.Field) *ColumnMeta

func (*ChunkMeta) Clone added in v1.0.0

func (m *ChunkMeta) Clone() *ChunkMeta

func (*ChunkMeta) DelEmptyColMeta added in v1.1.0

func (m *ChunkMeta) DelEmptyColMeta()

func (*ChunkMeta) GetColMeta added in v1.0.0

func (m *ChunkMeta) GetColMeta() []ColumnMeta

func (*ChunkMeta) GetSid

func (m *ChunkMeta) GetSid() (sid uint64)

func (*ChunkMeta) GetTimeRangeBy added in v1.3.0

func (m *ChunkMeta) GetTimeRangeBy(index int) SegmentRange

func (*ChunkMeta) Len added in v1.0.0

func (m *ChunkMeta) Len() int

func (*ChunkMeta) Less added in v1.0.0

func (m *ChunkMeta) Less(i, j int) bool

func (*ChunkMeta) MinMaxTime

func (m *ChunkMeta) MinMaxTime() (min int64, max int64)

func (*ChunkMeta) Rows

func (m *ChunkMeta) Rows(ab PreAggBuilder) int

func (*ChunkMeta) SegmentCount added in v1.0.0

func (m *ChunkMeta) SegmentCount() int

func (*ChunkMeta) Size

func (m *ChunkMeta) Size() int

func (*ChunkMeta) Swap added in v1.0.0

func (m *ChunkMeta) Swap(i, j int)

func (*ChunkMeta) TimeMeta

func (m *ChunkMeta) TimeMeta() *ColumnMeta

func (*ChunkMeta) UnmarshalWithColumns added in v1.2.0

func (m *ChunkMeta) UnmarshalWithColumns(src []byte, columns []string) ([]byte, error)

func (*ChunkMeta) Validation added in v1.4.0

func (m *ChunkMeta) Validation()

type ChunkMetaCodecCtx added in v1.4.0

type ChunkMetaCodecCtx struct {
	// contains filtered or unexported fields
}

func GetChunkMetaCodecCtx added in v1.4.0

func GetChunkMetaCodecCtx() *ChunkMetaCodecCtx

func (*ChunkMetaCodecCtx) GetHeader added in v1.4.0

func (ctx *ChunkMetaCodecCtx) GetHeader() *ChunkMetaHeader

func (*ChunkMetaCodecCtx) GetIndex added in v1.4.0

func (ctx *ChunkMetaCodecCtx) GetIndex(val string) uint64

func (*ChunkMetaCodecCtx) GetValue added in v1.4.0

func (ctx *ChunkMetaCodecCtx) GetValue(idx int) string

func (*ChunkMetaCodecCtx) MemSize added in v1.4.0

func (ctx *ChunkMetaCodecCtx) MemSize() int

func (*ChunkMetaCodecCtx) Release added in v1.4.0

func (ctx *ChunkMetaCodecCtx) Release()

func (*ChunkMetaCodecCtx) SetTrailer added in v1.4.0

func (ctx *ChunkMetaCodecCtx) SetTrailer(trailer *Trailer)

type ChunkMetaContext added in v1.2.0

type ChunkMetaContext struct {
	// contains filtered or unexported fields
}

func NewChunkMetaContext added in v1.2.0

func NewChunkMetaContext(schema record.Schemas) *ChunkMetaContext

func (*ChunkMetaContext) CodecCtx added in v1.4.0

func (ctx *ChunkMetaContext) CodecCtx() *ChunkMetaCodecCtx

func (*ChunkMetaContext) MemSize added in v1.2.0

func (ctx *ChunkMetaContext) MemSize() int

func (*ChunkMetaContext) Release added in v1.2.0

func (ctx *ChunkMetaContext) Release()

type ChunkMetaEntry added in v1.2.0

type ChunkMetaEntry struct {
	// contains filtered or unexported fields
}

func NewChunkMetaEntry added in v1.2.0

func NewChunkMetaEntry(filePath string) *ChunkMetaEntry

func (*ChunkMetaEntry) GetKey added in v1.2.0

func (e *ChunkMetaEntry) GetKey() string

func (*ChunkMetaEntry) GetTime added in v1.2.0

func (e *ChunkMetaEntry) GetTime() time.Time

func (*ChunkMetaEntry) GetValue added in v1.2.0

func (e *ChunkMetaEntry) GetValue() interface{}

func (*ChunkMetaEntry) SetTime added in v1.2.0

func (e *ChunkMetaEntry) SetTime(time time.Time)

func (*ChunkMetaEntry) SetValue added in v1.2.0

func (e *ChunkMetaEntry) SetValue(value interface{})

func (*ChunkMetaEntry) Size added in v1.2.0

func (e *ChunkMetaEntry) Size() int64

type ChunkMetaHeader added in v1.4.0

type ChunkMetaHeader struct {
	// contains filtered or unexported fields
}

func (*ChunkMetaHeader) AppendValue added in v1.4.0

func (h *ChunkMetaHeader) AppendValue(val string)

func (*ChunkMetaHeader) CopyTo added in v1.4.0

func (h *ChunkMetaHeader) CopyTo(dst *ChunkMetaHeader)

func (*ChunkMetaHeader) GetValue added in v1.4.0

func (h *ChunkMetaHeader) GetValue(idx int) string

func (*ChunkMetaHeader) Len added in v1.4.0

func (h *ChunkMetaHeader) Len() int

func (*ChunkMetaHeader) Marshal added in v1.4.0

func (h *ChunkMetaHeader) Marshal(dst []byte) []byte

func (*ChunkMetaHeader) Reset added in v1.4.0

func (h *ChunkMetaHeader) Reset()

func (*ChunkMetaHeader) Unmarshal added in v1.4.0

func (h *ChunkMetaHeader) Unmarshal(buf []byte)

type ColAux added in v1.2.0

type ColAux struct {
	// contains filtered or unexported fields
}

func NewColAux added in v1.2.0

func NewColAux(rec *record.Record, filterOption *BaseFilterOptions) *ColAux

type ColumnBuilder

type ColumnBuilder struct {
	// contains filtered or unexported fields
}

func NewColumnBuilder

func NewColumnBuilder() *ColumnBuilder

func (*ColumnBuilder) BuildPreAgg added in v1.0.0

func (b *ColumnBuilder) BuildPreAgg()

func (*ColumnBuilder) EncodeColumn

func (b *ColumnBuilder) EncodeColumn(ref record.Field, col *record.ColVal, timeCols []record.ColVal, segRowsLimit int, dataOffset int64) ([]byte, error)

func (*ColumnBuilder) EncodeColumnBySize added in v1.2.0

func (b *ColumnBuilder) EncodeColumnBySize(ref record.Field, col *record.ColVal, timeCols []record.ColVal, rowPerSegment []int, dataOffset int64) ([]byte, error)

func (*ColumnBuilder) SetEncodeMode added in v1.2.0

func (b *ColumnBuilder) SetEncodeMode(detached bool)

type ColumnIterator added in v1.0.0

type ColumnIterator struct {
	// contains filtered or unexported fields
}

func NewColumnIterator added in v1.0.0

func NewColumnIterator(fi *FileIterator) *ColumnIterator

func (*ColumnIterator) Close added in v1.0.0

func (itr *ColumnIterator) Close()

func (*ColumnIterator) Error added in v1.0.0

func (itr *ColumnIterator) Error() error

func (*ColumnIterator) IncrChunkUsed added in v1.0.0

func (itr *ColumnIterator) IncrChunkUsed()

func (*ColumnIterator) IterCurrentChunk added in v1.3.0

func (itr *ColumnIterator) IterCurrentChunk(p ColumnIteratorPerformer) error

func (*ColumnIterator) NextChunkMeta added in v1.0.0

func (itr *ColumnIterator) NextChunkMeta() bool

func (*ColumnIterator) NextColumn added in v1.0.0

func (itr *ColumnIterator) NextColumn(colIdx int) (*record.Field, bool)

func (*ColumnIterator) PutCol added in v1.0.0

func (itr *ColumnIterator) PutCol(col *record.ColVal)

func (*ColumnIterator) Run added in v1.0.0

type ColumnIteratorPerformer added in v1.0.0

type ColumnIteratorPerformer interface {
	Handle(col *record.ColVal, times []int64, lastSeg bool) error
	HasSeries(uint64) bool
	ColumnChanged(*record.Field) error
	SeriesChanged(uint64, []int64) error
	WriteOriginal(fi *FileIterator) error
}

type ColumnMeta

type ColumnMeta struct {
	// contains filtered or unexported fields
}

func (*ColumnMeta) Clone added in v1.0.0

func (m *ColumnMeta) Clone() ColumnMeta

func (*ColumnMeta) Equal added in v1.1.0

func (m *ColumnMeta) Equal(name string, ty int) bool

func (*ColumnMeta) GetPreAgg added in v1.2.0

func (m *ColumnMeta) GetPreAgg() []byte

func (*ColumnMeta) GetSegment added in v1.2.0

func (m *ColumnMeta) GetSegment(i int) (int64, uint32)

func (*ColumnMeta) IsTime added in v1.1.0

func (m *ColumnMeta) IsTime() bool

func (*ColumnMeta) Name added in v1.1.0

func (m *ColumnMeta) Name() string

func (*ColumnMeta) RowCount

func (m *ColumnMeta) RowCount(ref *record.Field, decs *ReadContext) (int64, error)

func (*ColumnMeta) Type added in v1.2.0

func (m *ColumnMeta) Type() uint8

type ColumnReader

type ColumnReader interface {
	ReadDataBlock(offset int64, size uint32, dst *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)
	ReadMetaBlock(metaIdx int, id uint64, offset int64, size uint32, count uint32, dst *pool.Buffer, ioPriority int) ([]byte, *readcache.CachePage, error)
	UnrefCachePage(cachePage *readcache.CachePage)
}

type CompactGroup

type CompactGroup struct {
	// contains filtered or unexported fields
}

func NewCompactGroup

func NewCompactGroup(name string, toLevle uint16, count int) *CompactGroup

func (*CompactGroup) Add added in v1.3.0

func (g *CompactGroup) Add(item string)

func (*CompactGroup) Len added in v1.3.0

func (g *CompactGroup) Len() int

func (*CompactGroup) UpdateLevel added in v1.3.0

func (g *CompactGroup) UpdateLevel(lv uint16)

type CompactGroupBuilder added in v1.3.0

type CompactGroupBuilder struct {
	// contains filtered or unexported fields
}

func (*CompactGroupBuilder) AddFile added in v1.3.0

func (b *CompactGroupBuilder) AddFile(f TSSPFile) bool

func (*CompactGroupBuilder) Init added in v1.3.0

func (b *CompactGroupBuilder) Init(name string, closing *int64, size int)

func (*CompactGroupBuilder) Limited added in v1.3.0

func (b *CompactGroupBuilder) Limited() bool

func (*CompactGroupBuilder) Release added in v1.3.0

func (b *CompactGroupBuilder) Release()

func (*CompactGroupBuilder) SwitchGroup added in v1.3.0

func (b *CompactGroupBuilder) SwitchGroup()

type CompactTask added in v1.3.0

type CompactTask struct {
	scheduler.BaseTask
	// contains filtered or unexported fields
}

func NewCompactTask added in v1.3.0

func NewCompactTask(table *MmsTables, plan *CompactGroup, full bool) *CompactTask

func (*CompactTask) BeforeExecute added in v1.3.0

func (t *CompactTask) BeforeExecute() bool

func (*CompactTask) Execute added in v1.3.0

func (t *CompactTask) Execute()

func (*CompactTask) Finish added in v1.3.0

func (t *CompactTask) Finish()

func (*CompactTask) IncrFull added in v1.3.0

func (t *CompactTask) IncrFull(n int64)

type CompactedFileInfo

type CompactedFileInfo struct {
	Name    string // measurement name with version
	IsOrder bool
	OldFile []string
	NewFile []string
}

type Config

type Config struct {
	SnapshotTblNum       int
	FragmentsNumPerFlush int
	// contains filtered or unexported fields
}

func GetColStoreConfig added in v1.1.0

func GetColStoreConfig() *Config

func GetTsStoreConfig added in v1.1.0

func GetTsStoreConfig() *Config

func NewColumnStoreConfig added in v1.1.1

func NewColumnStoreConfig() *Config

func NewTsStoreConfig added in v1.1.0

func NewTsStoreConfig() *Config

func (*Config) GetCompactionEnabled added in v1.2.0

func (c *Config) GetCompactionEnabled() bool

func (*Config) GetMaxRowsPerSegment added in v1.1.0

func (c *Config) GetMaxRowsPerSegment() int

func (*Config) GetMaxSegmentLimit added in v1.1.0

func (c *Config) GetMaxSegmentLimit() int

func (*Config) SetExpectedSegmentSize added in v1.2.0

func (c *Config) SetExpectedSegmentSize(n uint32)

func (*Config) SetFilesLimit

func (c *Config) SetFilesLimit(n int64)

func (*Config) SetMaxRowsPerSegment

func (c *Config) SetMaxRowsPerSegment(maxRowsPerSegmentLimit int)

func (*Config) SetMaxSegmentLimit

func (c *Config) SetMaxSegmentLimit(n int)

type CsChunkDataImp added in v1.2.0

type CsChunkDataImp struct {
	// contains filtered or unexported fields
}

func (*CsChunkDataImp) EncodeChunk added in v1.2.0

func (c *CsChunkDataImp) EncodeChunk(b *ChunkDataBuilder, id uint64, offset int64, rec *record.Record, dst []byte, timeSorted bool) ([]byte, error)

func (*CsChunkDataImp) EncodeChunkForCompaction added in v1.2.0

func (c *CsChunkDataImp) EncodeChunkForCompaction(b *ChunkDataBuilder, offset int64, rec *record.Record, dst []byte, accumulateRowsIndex []int, timeSorted bool) ([]byte, error)

func (*CsChunkDataImp) SetAccumulateRowsIndex added in v1.2.0

func (c *CsChunkDataImp) SetAccumulateRowsIndex(accumulateRowsIndex []int)

func (*CsChunkDataImp) SetDetachedInfo added in v1.2.0

func (c *CsChunkDataImp) SetDetachedInfo(writeDetached bool)

type DDLBasePlan added in v1.4.0

type DDLBasePlan interface {
	Execute(dst map[string]DDLRespData, mstKeys map[string][][]byte, condition influxql.Expr, tr util.TimeRange, limit int) error
	Stop()
}

DDLBasePlan as an abstraction of the DDL base plan

func NewDDLBasePlan added in v1.4.0

func NewDDLBasePlan(table TablesStore, idx IndexMergeSet, logger *Log.Logger, sh EngineShard, client metaclient.MetaClient, ddl hybridqp.DDLType) DDLBasePlan

type DDLRespData added in v1.4.0

type DDLRespData interface {
	Add(key, value string)
	ForEach(process func(key, value string))
	Count() int
}

DDLRespData as an abstraction of the DDL response data

func NewSeriesKeys added in v1.4.0

func NewSeriesKeys() DDLRespData

func NewTagSets added in v1.3.0

func NewTagSets() DDLRespData

type DetachedChunkMetaReader added in v1.2.0

type DetachedChunkMetaReader struct {
	// contains filtered or unexported fields
}

func NewDetachedChunkMetaReader added in v1.2.0

func NewDetachedChunkMetaReader(path string, obsOpts *obs.ObsOptions) (*DetachedChunkMetaReader, error)

func (*DetachedChunkMetaReader) ReadChunkMeta added in v1.2.0

func (reader *DetachedChunkMetaReader) ReadChunkMeta(offset, length []int64) ([]*ChunkMeta, error)

type DetachedMetaDataReader added in v1.2.0

type DetachedMetaDataReader struct {
	// contains filtered or unexported fields
}

func NewDetachedMetaDataReader added in v1.2.0

func NewDetachedMetaDataReader(path string, obsOpts *obs.ObsOptions, isSort bool) (*DetachedMetaDataReader, error)

func (*DetachedMetaDataReader) Close added in v1.3.0

func (reader *DetachedMetaDataReader) Close()

func (*DetachedMetaDataReader) InitReadBatch added in v1.2.0

func (reader *DetachedMetaDataReader) InitReadBatch(s []*SegmentMeta, schema record.Schemas)

func (*DetachedMetaDataReader) ReadBatch added in v1.2.0

func (reader *DetachedMetaDataReader) ReadBatch(dst *record.Record, decs *ReadContext) (*record.Record, error)

type DetachedMetaIndexReader added in v1.2.0

type DetachedMetaIndexReader struct {
	// contains filtered or unexported fields
}

func NewDetachedMetaIndexReader added in v1.2.0

func NewDetachedMetaIndexReader(path string, obsOpts *obs.ObsOptions) (*DetachedMetaIndexReader, error)

func (*DetachedMetaIndexReader) Close added in v1.3.0

func (reader *DetachedMetaIndexReader) Close()

func (*DetachedMetaIndexReader) ReadMetaIndex added in v1.2.0

func (reader *DetachedMetaIndexReader) ReadMetaIndex(offset, length []int64) ([]*MetaIndex, error)

type DetachedPKDataReader added in v1.2.0

type DetachedPKDataReader struct {
	// contains filtered or unexported fields
}

func NewDetachedPKDataReader added in v1.2.0

func NewDetachedPKDataReader(path string, opts *obs.ObsOptions) (*DetachedPKDataReader, error)

func (*DetachedPKDataReader) Close added in v1.3.0

func (reader *DetachedPKDataReader) Close()

func (*DetachedPKDataReader) Read added in v1.2.0

func (reader *DetachedPKDataReader) Read(offset, length []int64, metas []*colstore.DetachedPKMeta, info *colstore.DetachedPKMetaInfo) ([]*colstore.DetachedPKData, error)

type DetachedPKMetaInfoReader added in v1.2.0

type DetachedPKMetaInfoReader struct {
	// contains filtered or unexported fields
}

func NewDetachedPKMetaInfoReader added in v1.2.0

func NewDetachedPKMetaInfoReader(path string, opts *obs.ObsOptions) (*DetachedPKMetaInfoReader, error)

func (*DetachedPKMetaInfoReader) Close added in v1.3.0

func (reader *DetachedPKMetaInfoReader) Close()

func (*DetachedPKMetaInfoReader) Read added in v1.2.0

type DetachedPKMetaReader added in v1.2.0

type DetachedPKMetaReader struct {
	// contains filtered or unexported fields
}

func NewDetachedPKMetaReader added in v1.2.0

func NewDetachedPKMetaReader(path string, opts *obs.ObsOptions) (*DetachedPKMetaReader, error)

func (*DetachedPKMetaReader) Close added in v1.3.0

func (reader *DetachedPKMetaReader) Close()

func (*DetachedPKMetaReader) Read added in v1.2.0

func (reader *DetachedPKMetaReader) Read(offset, length []int64) ([]*colstore.DetachedPKMeta, error)

type DetachedSegmentEntry added in v1.2.0

type DetachedSegmentEntry struct {
	// contains filtered or unexported fields
}

func NewSegmentMetaDataEntry added in v1.2.0

func NewSegmentMetaDataEntry(segmentID string) *DetachedSegmentEntry

func (*DetachedSegmentEntry) GetKey added in v1.2.0

func (e *DetachedSegmentEntry) GetKey() string

func (*DetachedSegmentEntry) GetTime added in v1.2.0

func (e *DetachedSegmentEntry) GetTime() time.Time

func (*DetachedSegmentEntry) GetValue added in v1.2.0

func (e *DetachedSegmentEntry) GetValue() interface{}

func (*DetachedSegmentEntry) SetTime added in v1.2.0

func (e *DetachedSegmentEntry) SetTime(time time.Time)

func (*DetachedSegmentEntry) SetValue added in v1.2.0

func (e *DetachedSegmentEntry) SetValue(value interface{})

func (*DetachedSegmentEntry) Size added in v1.2.0

func (e *DetachedSegmentEntry) Size() int64

type EncodeChunkData added in v1.2.0

type EncodeChunkData interface {
	EncodeChunk(b *ChunkDataBuilder, id uint64, offset int64, rec *record.Record, dst []byte, timeSorted bool) ([]byte, error)
	EncodeChunkForCompaction(b *ChunkDataBuilder, offset int64, rec *record.Record, dst []byte, accumulateRowsIndex []int, timeSorted bool) ([]byte, error)
	SetAccumulateRowsIndex(rowsPerSegment []int)
	SetDetachedInfo(writeDetached bool)
}

type EncodeColumnMode added in v1.2.0

type EncodeColumnMode interface {
	// contains filtered or unexported methods
}

type EngineShard added in v1.3.0

type EngineShard interface {
	IsOpened() bool
	OpenAndEnable(client metaclient.MetaClient) error
	GetDataPath() string
	GetIdent() *meta.ShardIdentifier
}

type Event added in v1.3.0

type Event interface {
	Instance() Event
	Init(mst string, level uint16)
	Enable() bool
	OnWriteRecord(rec *record.Record)
	OnWriteChunkMeta(cm *ChunkMeta)
	OnNewFile(f TSSPFile)
	OnReplaceFile(shardDir string, lockFile string) error
	OnInterrupt()
	OnFinish(ctx *EventContext)
}

type EventBus added in v1.3.0

type EventBus struct {
	// contains filtered or unexported fields
}

func DefaultEventBus added in v1.3.0

func DefaultEventBus() *EventBus

func (*EventBus) NewEvents added in v1.3.0

func (b *EventBus) NewEvents(typ EventType, mst string, level uint16) *Events

func (*EventBus) Register added in v1.3.0

func (b *EventBus) Register(typ EventType, e Event)

type EventContext added in v1.3.0

type EventContext struct {
	// contains filtered or unexported fields
}

func NewEventContext added in v1.3.0

func NewEventContext(idx IndexMergeSet, scheduler *scheduler.TaskScheduler, signal chan struct{}) *EventContext

type EventType added in v1.3.0

type EventType int
const (
	EventTypeMergeSelf EventType = iota
	EventTypeStreamCompact
	EventTypeEnd
)

type Events added in v1.3.0

type Events struct {
	// contains filtered or unexported fields
}

func (*Events) Finish added in v1.3.0

func (es *Events) Finish(success bool, ctx *EventContext)

func (*Events) Instance added in v1.3.0

func (es *Events) Instance() *Events

func (*Events) Register added in v1.3.0

func (es *Events) Register(e Event)

func (*Events) TriggerNewFile added in v1.3.0

func (es *Events) TriggerNewFile(f TSSPFile)

func (*Events) TriggerReplaceFile added in v1.3.0

func (es *Events) TriggerReplaceFile(shardDir, lock string) error

func (*Events) TriggerWriteChunkMeta added in v1.3.0

func (es *Events) TriggerWriteChunkMeta(cm *ChunkMeta)

func (*Events) TriggerWriteRecord added in v1.3.0

func (es *Events) TriggerWriteRecord(rec *record.Record)

type ExtraData added in v1.4.0

type ExtraData struct {
	TimeStoreFlag         uint8
	ChunkMetaCompressFlag uint8

	ChunkMetaHeader *ChunkMetaHeader
	// contains filtered or unexported fields
}

func (*ExtraData) CopyTo added in v1.4.0

func (e *ExtraData) CopyTo(dst *ExtraData)

func (*ExtraData) EnableTimeStore added in v1.4.0

func (e *ExtraData) EnableTimeStore()

func (*ExtraData) MarshalExtraData added in v1.4.0

func (e *ExtraData) MarshalExtraData(dst []byte) []byte

func (*ExtraData) Reset added in v1.4.0

func (e *ExtraData) Reset()

func (*ExtraData) SetChunkMetaCompressFlag added in v1.4.0

func (e *ExtraData) SetChunkMetaCompressFlag(v uint8)

func (*ExtraData) SetChunkMetaHeader added in v1.4.0

func (e *ExtraData) SetChunkMetaHeader(header *ChunkMetaHeader)

func (*ExtraData) UnmarshalExtraData added in v1.4.0

func (e *ExtraData) UnmarshalExtraData(src []byte) ([]byte, error)

type FileInfoExtend added in v1.3.0

type FileInfoExtend struct {
	FileInfo meta.FileInfo
	Name     string
}

type FileIterator

type FileIterator struct {
	// contains filtered or unexported fields
}

func NewFileIterator

func NewFileIterator(r TSSPFile, log *Log.Logger) *FileIterator

func (*FileIterator) Close

func (itr *FileIterator) Close()

func (*FileIterator) GetCurtChunkMeta added in v1.0.0

func (itr *FileIterator) GetCurtChunkMeta() *ChunkMeta

func (*FileIterator) NextChunkMeta

func (itr *FileIterator) NextChunkMeta() bool

func (*FileIterator) ReadData added in v1.2.0

func (itr *FileIterator) ReadData(offset int64, size uint32) ([]byte, error)

func (*FileIterator) WithLog

func (itr *FileIterator) WithLog(log *Log.Logger)

type FileIterators

type FileIterators []*FileIterator

func (FileIterators) AverageRows

func (i FileIterators) AverageRows() int

func (FileIterators) Close

func (i FileIterators) Close()

func (FileIterators) MaxChunkRows

func (i FileIterators) MaxChunkRows() int

func (FileIterators) MaxColumns

func (i FileIterators) MaxColumns() int

type FileReader added in v1.1.0

type FileReader interface {
	Open() error
	Close() error
	ReadData(cm *ChunkMeta, segment int, dst *record.Record, ctx *ReadContext, ioPriority int) (*record.Record, error)
	Ref()
	Unref() int64
	MetaIndexAt(idx int) (*MetaIndex, error)
	MetaIndex(id uint64, tr util.TimeRange) (int, *MetaIndex, error)
	ChunkMeta(id uint64, offset int64, size, itemCount uint32, metaIdx int, ctx *ChunkMetaContext, ioPriority int) (*ChunkMeta, error)

	ReadMetaBlock(metaIdx int, id uint64, offset int64, size uint32, count uint32, buf *pool.Buffer, ioPriority int) ([]byte, *readcache.CachePage, error)
	ReadDataBlock(offset int64, size uint32, dst *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)
	Read(offset int64, size uint32, dst *[]byte, ioPriority int) ([]byte, error)
	ReadChunkMetaData(metaIdx int, m *MetaIndex, dst []ChunkMeta, ioPriority int) ([]ChunkMeta, error)
	LoadIdTimes(isOrder bool, p *IdTimePairs) error

	Stat() *Trailer
	MinMaxSeriesID() (min, max uint64, err error)
	MinMaxTime() (min, max int64, err error)
	Contains(id uint64, tm util.TimeRange) bool
	ContainsTime(tm util.TimeRange) bool
	ContainsId(id uint64) bool
	Name() string
	FileName() string
	Rename(newName string) error
	RenameOnObs(obsName string, tmp bool, obsOpt *obs.ObsOptions) error
	FileSize() int64
	InMemSize() int64
	Version() uint64
	FreeMemory()
	FreeFileHandle() error
	LoadIntoMemory() error
	LoadComponents() error
	AverageChunkRows() int
	MaxChunkRows() int
	GetFileReaderRef() int64
	ChunkMetaCompressMode() uint8
}

type FileReaderContext added in v1.2.0

type FileReaderContext struct {
	// contains filtered or unexported fields
}

func NewFileReaderContext added in v1.2.0

func NewFileReaderContext(tr util.TimeRange, schemas record.Schemas, decs *ReadContext, filterOpts *FilterOptions, filterBitmap *bitmap.FilterBitmap, isOrder bool) *FileReaderContext

func (*FileReaderContext) GetSchemas added in v1.3.0

func (f *FileReaderContext) GetSchemas() record.Schemas

type FileSwapper added in v1.2.0

type FileSwapper struct {
	// contains filtered or unexported fields
}

func NewFileSwapper added in v1.2.0

func NewFileSwapper(file string, lock string, limitCompact bool, compressMode int) (*FileSwapper, error)

func (*FileSwapper) CopyTo added in v1.2.0

func (s *FileSwapper) CopyTo(to io.Writer, buf []byte) (int64, error)

func (*FileSwapper) MustClose added in v1.2.0

func (s *FileSwapper) MustClose()

func (*FileSwapper) SetWriter added in v1.2.0

func (s *FileSwapper) SetWriter(w io.WriteCloser)

func (*FileSwapper) Write added in v1.2.0

func (s *FileSwapper) Write(b []byte) (int, error)

type FilesInfo

type FilesInfo struct {
	// contains filtered or unexported fields
}

type FilterOptions

type FilterOptions struct {
	// contains filtered or unexported fields
}

func NewFilterOpts

func NewFilterOpts(cond influxql.Expr, filterOption *BaseFilterOptions, tags *influx.PointTags, rowFilters *[]clv.RowFilter) *FilterOptions

func (*FilterOptions) GetCond added in v1.1.0

func (fo *FilterOptions) GetCond() influxql.Expr

func (*FilterOptions) SetCondFuncs added in v1.1.0

func (fo *FilterOptions) SetCondFuncs(filterOption *BaseFilterOptions)

type FirstLastReader added in v1.1.0

type FirstLastReader struct {
	// contains filtered or unexported fields
}

func (*FirstLastReader) Init added in v1.1.0

func (r *FirstLastReader) Init(cm *ChunkMeta, cr ColumnReader, ref *record.Field, dst *record.Record, first bool) *FirstLastReader

func (*FirstLastReader) Read added in v1.1.0

func (r *FirstLastReader) Read(ctx *ReadContext, copied bool, ioPriority int) error

func (*FirstLastReader) ReadMaxFromPreAgg added in v1.4.0

func (r *FirstLastReader) ReadMaxFromPreAgg(colMeta *ColumnMeta) (interface{}, int64, bool)

func (*FirstLastReader) ReadMinFromPreAgg added in v1.4.0

func (r *FirstLastReader) ReadMinFromPreAgg(colMeta *ColumnMeta) (interface{}, int64, bool)

func (*FirstLastReader) Release added in v1.1.0

func (r *FirstLastReader) Release()

type FloatPreAgg

type FloatPreAgg struct {
	// contains filtered or unexported fields
}

FloatPreAgg If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.

func NewFloatPreAgg

func NewFloatPreAgg() *FloatPreAgg

func (*FloatPreAgg) VLCDecode added in v1.4.0

func (m *FloatPreAgg) VLCDecode(src []byte) ([]byte, error)

func (*FloatPreAgg) VLCEncode added in v1.4.0

func (m *FloatPreAgg) VLCEncode(dst []byte) []byte

type FragmentIterator added in v1.2.0

type FragmentIterator interface {
	// contains filtered or unexported methods
}

type FragmentIterators added in v1.1.1

type FragmentIterators struct {
	SortKeyFileds []record.Field

	TableData

	Conf *Config

	PkRec             []*record.Record
	RecordResult      *record.Record
	TimeClusterResult *record.Record
	// contains filtered or unexported fields
}

func (*FragmentIterators) Close added in v1.1.1

func (f *FragmentIterators) Close()

func (*FragmentIterators) CompareWithBreakPoint added in v1.1.1

func (f *FragmentIterators) CompareWithBreakPoint(curPos, breakPos int) bool

func (*FragmentIterators) IsEmpty added in v1.2.0

func (f *FragmentIterators) IsEmpty() bool

func (*FragmentIterators) Len added in v1.1.1

func (f *FragmentIterators) Len() int

func (*FragmentIterators) Less added in v1.1.1

func (f *FragmentIterators) Less(i, j int) bool

func (*FragmentIterators) Pop added in v1.1.1

func (f *FragmentIterators) Pop() interface{}

func (*FragmentIterators) Push added in v1.1.1

func (f *FragmentIterators) Push(v interface{})

func (*FragmentIterators) Swap added in v1.1.1

func (f *FragmentIterators) Swap(i, j int)

func (*FragmentIterators) WithLog added in v1.1.1

func (f *FragmentIterators) WithLog(log *Log.Logger)

type FragmentIteratorsPool added in v1.1.1

type FragmentIteratorsPool struct {
	// contains filtered or unexported fields
}

func NewFragmentIteratorsPool added in v1.1.1

func NewFragmentIteratorsPool(n int) *FragmentIteratorsPool

type GenDDLRespDataFunc added in v1.4.0

type GenDDLRespDataFunc func() DDLRespData

GenDDLRespDataFunc as a function to generate DDLRespData

func GetDDLRespData added in v1.4.0

func GetDDLRespData(ddl hybridqp.DDLType) GenDDLRespDataFunc

type GenSequenceHandlerFunc added in v1.4.0

type GenSequenceHandlerFunc func(idx IndexMergeSet, condition influxql.Expr, tr *util.TimeRange, limit int) SequenceIteratorHandler

GenSequenceHandlerFunc as a function to generate SequenceHandler

func GetDDLDDLSequenceHandler added in v1.4.0

func GetDDLDDLSequenceHandler(ddl hybridqp.DDLType) GenSequenceHandlerFunc

type HotFileManager added in v1.4.0

type HotFileManager struct {
	// contains filtered or unexported fields
}

func NewHotFileManager added in v1.4.0

func NewHotFileManager() *HotFileManager

func (*HotFileManager) Add added in v1.4.0

func (m *HotFileManager) Add(f TSSPFile)

func (*HotFileManager) AddAll added in v1.4.0

func (m *HotFileManager) AddAll(files []TSSPFile)

func (*HotFileManager) AllocLoadMemory added in v1.4.0

func (m *HotFileManager) AllocLoadMemory(size int64) bool

func (*HotFileManager) BackgroundFree added in v1.4.0

func (m *HotFileManager) BackgroundFree()

func (*HotFileManager) Free added in v1.4.0

func (m *HotFileManager) Free()

func (*HotFileManager) InHotDuration added in v1.4.0

func (m *HotFileManager) InHotDuration(f TSSPFile) bool

func (*HotFileManager) IncrMemorySize added in v1.4.0

func (m *HotFileManager) IncrMemorySize(size int64)

func (*HotFileManager) Run added in v1.4.0

func (m *HotFileManager) Run()

func (*HotFileManager) SetMaxMemorySize added in v1.4.0

func (m *HotFileManager) SetMaxMemorySize(size int64)

func (*HotFileManager) Stop added in v1.4.0

func (m *HotFileManager) Stop()

type HotFileReader added in v1.4.0

type HotFileReader struct {
	fileops.BasicFileReader
	// contains filtered or unexported fields
}

func NewHotFileReader added in v1.4.0

func NewHotFileReader(r fileops.BasicFileReader, buf []byte) *HotFileReader

func (*HotFileReader) IsMmapRead added in v1.4.0

func (r *HotFileReader) IsMmapRead() bool

func (*HotFileReader) ReadAt added in v1.4.0

func (r *HotFileReader) ReadAt(off int64, size uint32, dstPtr *[]byte, ioPriority int) ([]byte, error)

func (*HotFileReader) Release added in v1.4.0

func (r *HotFileReader) Release()

func (*HotFileReader) Size added in v1.4.0

func (r *HotFileReader) Size() (int64, error)

type HotFileWriter added in v1.4.0

type HotFileWriter struct {
	fileops.FileWriter
	// contains filtered or unexported fields
}

func NewHotFileWriter added in v1.4.0

func NewHotFileWriter(w fileops.FileWriter) *HotFileWriter

func (*HotFileWriter) AppendChunkMetaToData added in v1.4.0

func (w *HotFileWriter) AppendChunkMetaToData() error

func (*HotFileWriter) BuildHotFileReader added in v1.4.0

func (w *HotFileWriter) BuildHotFileReader(r fileops.BasicFileReader) *HotFileReader

func (*HotFileWriter) MemSize added in v1.4.0

func (w *HotFileWriter) MemSize() int

func (*HotFileWriter) Release added in v1.4.0

func (w *HotFileWriter) Release()

func (*HotFileWriter) WriteChunkMeta added in v1.4.0

func (w *HotFileWriter) WriteChunkMeta(b []byte) (int, error)

func (*HotFileWriter) WriteData added in v1.4.0

func (w *HotFileWriter) WriteData(b []byte) (int, error)

type IdTimePairs

type IdTimePairs struct {
	Name string
	Ids  []uint64
	Tms  []int64
	Rows []int64
}

func GetIDTimePairs

func GetIDTimePairs(name string) *IdTimePairs

func (*IdTimePairs) Add

func (p *IdTimePairs) Add(id uint64, tm int64)

func (*IdTimePairs) AddRowCounts

func (p *IdTimePairs) AddRowCounts(rowCounts int64)

func (*IdTimePairs) Len

func (p *IdTimePairs) Len() int

func (*IdTimePairs) Marshal

func (p *IdTimePairs) Marshal(encTimes bool, dst []byte, ctx *encoding.CoderContext) []byte

func (*IdTimePairs) Reset

func (p *IdTimePairs) Reset(name string)

func (*IdTimePairs) UnmarshalBlocks added in v1.4.0

func (p *IdTimePairs) UnmarshalBlocks(decTimes bool, src []byte, startIdx int, decoder *encoding.CoderContext) (int, int, int, error)

func (*IdTimePairs) UnmarshalHeader added in v1.4.0

func (p *IdTimePairs) UnmarshalHeader(src []byte) (uint32, error)

type ImmTable added in v1.1.0

type ImmTable interface {
	GetEngineType() config.EngineType
	GetCompactionType(name string) config.CompactionType

	NewFileIterators(m *MmsTables, group *CompactGroup) (FilesInfo, error)
	AddTSSPFiles(m *MmsTables, name string, isOrder bool, files ...TSSPFile)
	AddBothTSSPFiles(flushed *bool, m *MmsTables, name string, orderFiles []TSSPFile, unorderFiles []TSSPFile)
	LevelPlan(m *MmsTables, level uint16) []*CompactGroup
	SetMstInfo(name string, mstInfo *meta.MeasurementInfo)
	GetMstInfo(name string) (*meta.MeasurementInfo, bool)
	UpdateAccumulateMetaIndexInfo(name string, index *AccumulateMetaIndex)
	FullyCompacted(m *MmsTables) bool
	// contains filtered or unexported methods
}

type IndexCompressWriter added in v1.2.0

type IndexCompressWriter struct {
	// contains filtered or unexported fields
}

func (*IndexCompressWriter) BlockSize added in v1.2.0

func (w *IndexCompressWriter) BlockSize() int

func (*IndexCompressWriter) Close added in v1.2.0

func (w *IndexCompressWriter) Close() error

func (*IndexCompressWriter) CopyTo added in v1.2.0

func (w *IndexCompressWriter) CopyTo(to io.Writer) (int, error)

func (*IndexCompressWriter) GetWriter added in v1.2.0

func (w *IndexCompressWriter) GetWriter() *bufio.Writer

func (*IndexCompressWriter) Init added in v1.2.0

func (w *IndexCompressWriter) Init(name string, lock *string, cacheMeta bool, limitCompact bool)

func (*IndexCompressWriter) MetaDataBlocks added in v1.2.0

func (w *IndexCompressWriter) MetaDataBlocks(dst [][]byte) [][]byte

func (*IndexCompressWriter) Size added in v1.2.0

func (w *IndexCompressWriter) Size() int

func (*IndexCompressWriter) SwitchMetaBuffer added in v1.2.0

func (w *IndexCompressWriter) SwitchMetaBuffer() (int, error)

func (*IndexCompressWriter) Write added in v1.2.0

func (w *IndexCompressWriter) Write(p []byte) (int, error)

type IndexFrags added in v1.2.0

type IndexFrags interface {
	BasePath() string
	FragCount() int64
	IndexCount() int
	Indexes() interface{}
	AppendIndexes(...interface{})
	FragRanges() []fragment.FragmentRanges
	AppendFragRanges(...fragment.FragmentRanges)
	AddFragCount(int64)
	SetErr(error)
	GetErr() error
	Size() int
}

func GetDetachedSegmentTask added in v1.2.0

func GetDetachedSegmentTask(queryID string) (IndexFrags, bool)

type IndexMergeSet added in v1.3.0

type IndexMergeSet interface {
	GetSeries(sid uint64, buf []byte, condition influxql.Expr, callback func(key *influx.SeriesKey)) error
	GetSeriesBytes(sid uint64, buf []byte, condition influxql.Expr, callback func(key *influx.SeriesBytes)) error
	SearchSeriesKeys(series [][]byte, name []byte, condition influxql.Expr) ([][]byte, error)
}

type IndexWriter added in v1.2.0

type IndexWriter interface {
	Init(name string, lock *string, cacheMeta bool, limitCompact bool)
	Write(p []byte) (int, error)
	Size() int
	BlockSize() int
	CopyTo(to io.Writer) (int, error)
	SwitchMetaBuffer() (int, error)
	MetaDataBlocks(dst [][]byte) [][]byte

	Close() error
	// contains filtered or unexported methods
}

func NewPKIndexWriter added in v1.2.0

func NewPKIndexWriter(indexName string, cacheMeta bool, limitCompact bool, lockPath *string) IndexWriter

type IntegerPreAgg

type IntegerPreAgg struct {
	// contains filtered or unexported fields
}

IntegerPreAgg If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.

func NewIntegerPreAgg

func NewIntegerPreAgg() *IntegerPreAgg

func (*IntegerPreAgg) VLCDecode added in v1.4.0

func (m *IntegerPreAgg) VLCDecode(src []byte) ([]byte, error)

func (*IntegerPreAgg) VLCEncode added in v1.4.0

func (m *IntegerPreAgg) VLCEncode(dst []byte) []byte

type IteratorByBlock added in v1.2.0

type IteratorByBlock struct {
	// contains filtered or unexported fields
}

IteratorByBlock for single mst

func NewIteratorByBlock added in v1.2.0

func NewIteratorByBlock(f *FragmentIterators, conf *Config, group FilesInfo, accumulateMetaIndex *AccumulateMetaIndex) *IteratorByBlock

func (*IteratorByBlock) Flush added in v1.2.0

func (ib *IteratorByBlock) Flush(pkSchema record.Schemas, readFinal bool, tbStore *MmsTables) error

func (*IteratorByBlock) WriteDetachedMeta added in v1.2.0

func (ib *IteratorByBlock) WriteDetachedMeta(pkSchema record.Schemas) error

type IteratorByRow added in v1.2.0

type IteratorByRow struct {
	// contains filtered or unexported fields
}

func NewIteratorByRow added in v1.2.0

func NewIteratorByRow(f *FragmentIterators, conf *Config) *IteratorByRow

func (*IteratorByRow) Flush added in v1.2.0

func (ir *IteratorByRow) Flush(tbStore *MmsTables, pkSchema record.Schemas, final bool) error

func (*IteratorByRow) GetBreakPoint added in v1.2.0

func (ir *IteratorByRow) GetBreakPoint()

func (*IteratorByRow) NextWithBreakPoint added in v1.2.0

func (ir *IteratorByRow) NextWithBreakPoint()

type Location

type Location struct {
	// contains filtered or unexported fields
}

func NewLocation

func NewLocation(r TSSPFile, ctx *ReadContext) *Location

func (*Location) AscendingDone added in v1.1.0

func (l *Location) AscendingDone()

func (*Location) Contains

func (l *Location) Contains(sid uint64, tr util.TimeRange, ctx *ChunkMetaContext) (bool, error)

func (*Location) DescendingDone added in v1.1.0

func (l *Location) DescendingDone()

func (*Location) GetChunkMeta

func (l *Location) GetChunkMeta() *ChunkMeta

func (*Location) ReadData

func (l *Location) ReadData(filterOpts *FilterOptions, dst *record.Record, filterDst *record.Record) (*record.Record, error)

func (*Location) ResetMeta

func (l *Location) ResetMeta()

func (*Location) SetChunkMeta added in v1.2.0

func (l *Location) SetChunkMeta(chunkMeta *ChunkMeta)

func (*Location) SetClosedSignal added in v1.3.0

func (d *Location) SetClosedSignal(s *bool)

func (*Location) SetFragmentRanges added in v1.1.0

func (l *Location) SetFragmentRanges(frs []*fragment.FragmentRange)

type LocationCursor

type LocationCursor struct {
	// contains filtered or unexported fields
}

func NewLocationCursor

func NewLocationCursor(n int) *LocationCursor

func (*LocationCursor) AddFilterRecPool added in v1.1.0

func (l *LocationCursor) AddFilterRecPool(pool *record.CircularRecordPool)

func (*LocationCursor) AddLocation

func (l *LocationCursor) AddLocation(loc *Location)

func (*LocationCursor) Close added in v1.1.0

func (l *LocationCursor) Close()

func (*LocationCursor) FragmentCount added in v1.1.0

func (l *LocationCursor) FragmentCount() int

func (*LocationCursor) Len

func (l *LocationCursor) Len() int

func (*LocationCursor) Less

func (l *LocationCursor) Less(i, j int) bool

func (*LocationCursor) ReadData

func (l *LocationCursor) ReadData(filterOpts *FilterOptions, dst *record.Record, filterBitmap *bitmap.FilterBitmap,
	unnestOperator UnnestOperator) (*record.Record, error)

func (*LocationCursor) ReadMeta

func (l *LocationCursor) ReadMeta(filterOpts *FilterOptions, dst *record.Record, filterBitmap *bitmap.FilterBitmap) (*record.Record, error)

func (*LocationCursor) ReadOutOfOrderMeta

func (l *LocationCursor) ReadOutOfOrderMeta(filterOpts *FilterOptions, dst *record.Record) (*record.Record, error)

func (*LocationCursor) Reset added in v1.3.0

func (l *LocationCursor) Reset()

func (*LocationCursor) Reverse

func (l *LocationCursor) Reverse()

func (*LocationCursor) RowCount added in v1.1.0

func (l *LocationCursor) RowCount() int

func (*LocationCursor) Swap

func (l *LocationCursor) Swap(i, j int)

type MatchAllOperator added in v1.3.0

type MatchAllOperator struct {
	// contains filtered or unexported fields
}

func (*MatchAllOperator) Compute added in v1.3.0

func (r *MatchAllOperator) Compute(rec *record.Record)

type MeasurementInProcess added in v1.2.0

type MeasurementInProcess struct {
	// contains filtered or unexported fields
}

func NewMeasurementInProcess added in v1.2.0

func NewMeasurementInProcess() *MeasurementInProcess

func (*MeasurementInProcess) Add added in v1.2.0

func (m *MeasurementInProcess) Add(name string) bool

func (*MeasurementInProcess) Del added in v1.2.0

func (m *MeasurementInProcess) Del(name string)

func (*MeasurementInProcess) Has added in v1.3.0

func (m *MeasurementInProcess) Has(name string) bool

type MergeColPool added in v1.1.0

type MergeColPool struct {
	// contains filtered or unexported fields
}

func (*MergeColPool) Get added in v1.1.0

func (p *MergeColPool) Get() *record.ColVal

func (*MergeColPool) Put added in v1.1.0

func (p *MergeColPool) Put(col *record.ColVal)

type MergeContext added in v1.3.0

type MergeContext struct {
	// contains filtered or unexported fields
}

func BuildMergeContext added in v1.3.0

func BuildMergeContext(mst string, files *TSSPFiles, full bool, lmt *lastMergeTime) []*MergeContext

func NewMergeContext added in v1.0.0

func NewMergeContext(mst string, level uint16, mergeSelf bool) *MergeContext

func (*MergeContext) AddUnordered added in v1.3.0

func (ctx *MergeContext) AddUnordered(f TSSPFile)

func (*MergeContext) Limited added in v1.3.0

func (ctx *MergeContext) Limited() bool

func (*MergeContext) MergeSelf added in v1.3.0

func (ctx *MergeContext) MergeSelf() bool

func (*MergeContext) MergeSelfFast added in v1.3.0

func (ctx *MergeContext) MergeSelfFast() bool

func (*MergeContext) Release added in v1.3.0

func (ctx *MergeContext) Release()

func (*MergeContext) Sort added in v1.3.0

func (ctx *MergeContext) Sort()

func (*MergeContext) ToLevel added in v1.3.0

func (ctx *MergeContext) ToLevel() uint16

func (*MergeContext) UnorderedLen added in v1.3.0

func (ctx *MergeContext) UnorderedLen() int

func (*MergeContext) UpdateLevel added in v1.3.0

func (ctx *MergeContext) UpdateLevel(l uint16)

type MergePerformers added in v1.3.0

type MergePerformers struct {
	// contains filtered or unexported fields
}

func NewMergePerformers added in v1.3.0

func NewMergePerformers(ur *UnorderedReader) *MergePerformers

func (*MergePerformers) Close added in v1.3.0

func (c *MergePerformers) Close()

func (*MergePerformers) Closed added in v1.3.0

func (c *MergePerformers) Closed() bool

func (*MergePerformers) Len added in v1.3.0

func (c *MergePerformers) Len() int

func (*MergePerformers) Less added in v1.3.0

func (c *MergePerformers) Less(i, j int) bool

func (*MergePerformers) Next added in v1.3.0

func (c *MergePerformers) Next() error

func (*MergePerformers) Pop added in v1.3.0

func (c *MergePerformers) Pop() interface{}

func (*MergePerformers) Push added in v1.3.0

func (c *MergePerformers) Push(v interface{})

func (*MergePerformers) Release added in v1.3.0

func (c *MergePerformers) Release()

func (*MergePerformers) Swap added in v1.3.0

func (c *MergePerformers) Swap(i, j int)

type MergeSelf added in v1.3.0

type MergeSelf struct {
	// contains filtered or unexported fields
}

func NewMergeSelf added in v1.3.0

func NewMergeSelf(mts *MmsTables, lg *logger.Logger) *MergeSelf

func (*MergeSelf) InitEvents added in v1.3.0

func (m *MergeSelf) InitEvents(ctx *MergeContext) *Events

func (*MergeSelf) Merge added in v1.3.0

func (m *MergeSelf) Merge(mst string, toLevel uint16, files []TSSPFile) (TSSPFile, error)

func (*MergeSelf) Stop added in v1.3.0

func (m *MergeSelf) Stop()

type MergeSelfParquetEvent added in v1.3.0

type MergeSelfParquetEvent struct {
	TSSP2ParquetEvent
}

func (*MergeSelfParquetEvent) Instance added in v1.3.0

func (e *MergeSelfParquetEvent) Instance() Event

func (*MergeSelfParquetEvent) OnWriteRecord added in v1.3.0

func (e *MergeSelfParquetEvent) OnWriteRecord(rec *record.Record)

type MetaControl added in v1.3.0

type MetaControl interface {
	Push(MetaDataInfo)
	Pop() (MetaDataInfo, bool)
	IsEmpty() bool
}

func NewMetaControl added in v1.3.0

func NewMetaControl(isQueue bool, count int) MetaControl

type MetaData added in v1.3.0

type MetaData struct {
	// contains filtered or unexported fields
}

func NewMetaData added in v1.3.0

func NewMetaData(blockIndex int64, data []byte, offset int32) (*MetaData, error)

func (*MetaData) GetBlockIndex added in v1.3.0

func (m *MetaData) GetBlockIndex() int64

func (*MetaData) GetContentBlockLength added in v1.3.0

func (m *MetaData) GetContentBlockLength() int32

func (*MetaData) GetContentBlockOffset added in v1.3.0

func (m *MetaData) GetContentBlockOffset() int64

func (*MetaData) GetMaxTime added in v1.3.0

func (m *MetaData) GetMaxTime() int64

func (*MetaData) GetMinTime added in v1.3.0

func (m *MetaData) GetMinTime() int64

type MetaDataInfo added in v1.3.0

type MetaDataInfo interface {
	GetMinTime() int64
	GetMaxTime() int64
}

type MetaDatas added in v1.3.0

type MetaDatas []*MetaData

func (MetaDatas) Len added in v1.3.0

func (a MetaDatas) Len() int

func (MetaDatas) Less added in v1.3.0

func (a MetaDatas) Less(i, j int) bool

func (MetaDatas) Swap added in v1.3.0

func (a MetaDatas) Swap(i, j int)

type MetaIndex

type MetaIndex struct {
	// contains filtered or unexported fields
}

MetaIndex If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.

func GetMetaIndexAndBlockId added in v1.3.0

func GetMetaIndexAndBlockId(path string, obsOpts *obs.ObsOptions, chunkCount int64, tr util.TimeRange) ([]int64, []*MetaIndex, error)

func (*MetaIndex) GetCount added in v1.4.0

func (m *MetaIndex) GetCount() uint32

func (*MetaIndex) GetID added in v1.3.0

func (m *MetaIndex) GetID() uint64

func (*MetaIndex) GetOffset added in v1.3.0

func (m *MetaIndex) GetOffset() int64

func (*MetaIndex) GetSize added in v1.3.0

func (m *MetaIndex) GetSize() uint32

func (*MetaIndex) IsExist added in v1.2.0

func (m *MetaIndex) IsExist(tr util.TimeRange) bool

type MetaQueue added in v1.3.0

type MetaQueue []MetaDataInfo

func (*MetaQueue) IsEmpty added in v1.3.0

func (q *MetaQueue) IsEmpty() bool

func (*MetaQueue) Pop added in v1.3.0

func (q *MetaQueue) Pop() (MetaDataInfo, bool)

func (*MetaQueue) Push added in v1.3.0

func (q *MetaQueue) Push(v MetaDataInfo)

type MetaStack added in v1.3.0

type MetaStack []MetaDataInfo

func (*MetaStack) IsEmpty added in v1.3.0

func (s *MetaStack) IsEmpty() bool

func (*MetaStack) Pop added in v1.3.0

func (s *MetaStack) Pop() (MetaDataInfo, bool)

func (*MetaStack) Push added in v1.3.0

func (s *MetaStack) Push(value MetaDataInfo)

type MmsIdTime

type MmsIdTime struct {
	// contains filtered or unexported fields
}

func NewMmsIdTime

func NewMmsIdTime(sc *SeriesCounter) *MmsIdTime

func (*MmsIdTime) Get added in v1.3.0

func (m *MmsIdTime) Get(id uint64) (int64, int64)

type MmsReaders

type MmsReaders struct {
	Orders      TableReaders
	OutOfOrders TableReaders
}

type MmsTables

type MmsTables struct {
	Order      map[string]*TSSPFiles        // {"cpu_0001": *TSSPFiles}
	OutOfOrder map[string]*TSSPFiles        // {"cpu_0001": *TSSPFiles}
	CSFiles    map[string]*TSSPFiles        // {"cpu_0001": *TSSPFiles} tsspFiles for columnStore
	PKFiles    map[string]*colstore.PKFiles // {"cpu_0001": *PKFiles} PKFiles for columnStore

	ImmTable ImmTable

	Conf *Config
	// contains filtered or unexported fields
}

func NewTableStore

func NewTableStore(dir string, lock *string, tier *uint64, compactRecovery bool, config *Config) *MmsTables

func (*MmsTables) AddBothTSSPFiles added in v1.2.0

func (m *MmsTables) AddBothTSSPFiles(flushed *bool, name string, orderFiles []TSSPFile, unorderFiles []TSSPFile)

func (*MmsTables) AddPKFile added in v1.1.0

func (m *MmsTables) AddPKFile(mstName string, file string, rec *record.Record, mark fragment.IndexFragment, tcLocation int8)

func (*MmsTables) AddRowCountsBySid

func (m *MmsTables) AddRowCountsBySid(measurement string, sid uint64, rowCounts int64)

func (*MmsTables) AddTSSPFiles

func (m *MmsTables) AddTSSPFiles(name string, isOrder bool, files ...TSSPFile)

now not use for tsEngine

func (*MmsTables) AddTable

func (m *MmsTables) AddTable(mb *MsBuilder, isOrder bool, tmp bool)

func (*MmsTables) Close

func (m *MmsTables) Close() error

func (*MmsTables) CompactDone

func (m *MmsTables) CompactDone(files []string)

func (*MmsTables) CompactionDisable

func (m *MmsTables) CompactionDisable()

func (*MmsTables) CompactionEnable

func (m *MmsTables) CompactionEnable()

func (*MmsTables) CompactionEnabled

func (m *MmsTables) CompactionEnabled() bool

func (*MmsTables) CopyCSFiles added in v1.3.0

func (m *MmsTables) CopyCSFiles(name string) []TSSPFile

func (*MmsTables) DisableCompAndMerge added in v1.0.0

func (m *MmsTables) DisableCompAndMerge()

func (*MmsTables) DropMeasurement

func (m *MmsTables) DropMeasurement(_ context.Context, name string) error

func (*MmsTables) EnableCompAndMerge added in v1.0.0

func (m *MmsTables) EnableCompAndMerge()

func (*MmsTables) File

func (m *MmsTables) File(mstName string, fileName string, isOrder bool) TSSPFile

func (*MmsTables) FreeAllMemReader

func (m *MmsTables) FreeAllMemReader()

func (*MmsTables) FreeSequencer added in v1.0.0

func (m *MmsTables) FreeSequencer() bool

func (*MmsTables) FullCompact

func (m *MmsTables) FullCompact(shid uint64) error

func (*MmsTables) FullyCompacted added in v1.2.0

func (m *MmsTables) FullyCompacted() bool

func (*MmsTables) GetAllMstList added in v1.3.1

func (m *MmsTables) GetAllMstList() []string

func (*MmsTables) GetBothFilesRef added in v1.0.0

func (m *MmsTables) GetBothFilesRef(measurement string, hasTimeFilter bool, tr util.TimeRange, flushed *bool) ([]TSSPFile, []TSSPFile, bool)

func (*MmsTables) GetCSFiles added in v1.1.0

func (m *MmsTables) GetCSFiles(name string) (files *TSSPFiles, ok bool)

func (*MmsTables) GetFileSeq added in v1.0.0

func (m *MmsTables) GetFileSeq() uint64

func (*MmsTables) GetMstFileStat

func (m *MmsTables) GetMstFileStat() *statistics.FileStat

func (*MmsTables) GetMstInfo added in v1.2.0

func (m *MmsTables) GetMstInfo(name string) (*meta.MeasurementInfo, bool)

func (*MmsTables) GetObsOption added in v1.3.0

func (m *MmsTables) GetObsOption() *obs.ObsOptions

func (*MmsTables) GetOutOfOrderFileNum

func (m *MmsTables) GetOutOfOrderFileNum() int

func (*MmsTables) GetPKFile added in v1.1.0

func (m *MmsTables) GetPKFile(mstName string, file string) (pkInfo *colstore.PKInfo, ok bool)

func (*MmsTables) GetRowCountsBySid

func (m *MmsTables) GetRowCountsBySid(measurement string, sid uint64) (int64, error)

func (*MmsTables) GetShardID added in v1.3.0

func (m *MmsTables) GetShardID() uint64

func (*MmsTables) GetTSSPFiles added in v1.0.0

func (m *MmsTables) GetTSSPFiles(name string, isOrder bool) (files *TSSPFiles, ok bool)

func (*MmsTables) GetTableFileNum added in v1.3.0

func (m *MmsTables) GetTableFileNum(name string, order bool) int

func (*MmsTables) IsOutOfOrderFilesExist added in v1.0.0

func (m *MmsTables) IsOutOfOrderFilesExist() bool

func (*MmsTables) LevelCompact

func (m *MmsTables) LevelCompact(level uint16, shid uint64) error

func (*MmsTables) Listen added in v1.0.0

func (m *MmsTables) Listen(signal chan struct{}, onClose func())

func (*MmsTables) LoadSequencer added in v1.3.0

func (m *MmsTables) LoadSequencer()

func (*MmsTables) MergeDisable

func (m *MmsTables) MergeDisable()

func (*MmsTables) MergeEnable

func (m *MmsTables) MergeEnable()

func (*MmsTables) MergeEnabled

func (m *MmsTables) MergeEnabled() bool

func (*MmsTables) MergeOutOfOrder

func (m *MmsTables) MergeOutOfOrder(shId uint64, full bool, force bool) error

func (*MmsTables) NewChunkIterators

func (m *MmsTables) NewChunkIterators(group FilesInfo) *ChunkIterators

func (*MmsTables) NewStreamIterators

func (m *MmsTables) NewStreamIterators(group FilesInfo) *StreamIterators

func (*MmsTables) NewStreamWriteFile added in v1.0.0

func (m *MmsTables) NewStreamWriteFile(mst string) *StreamWriteFile

func (*MmsTables) NextSequence

func (m *MmsTables) NextSequence() uint64

func (*MmsTables) Open

func (m *MmsTables) Open() (int64, error)

func (*MmsTables) ReloadSequencer added in v1.1.0

func (m *MmsTables) ReloadSequencer(seq *Sequencer, async bool)

func (*MmsTables) RenameFileToLevel added in v1.3.0

func (m *MmsTables) RenameFileToLevel(plan *CompactGroup) error

func (*MmsTables) ReplaceDownSampleFiles added in v1.0.0

func (m *MmsTables) ReplaceDownSampleFiles(mstNames []string, originFiles [][]TSSPFile, newFiles [][]TSSPFile, isOrder bool, callBack func()) (err error)

func (*MmsTables) ReplaceFiles

func (m *MmsTables) ReplaceFiles(name string, oldFiles, newFiles []TSSPFile, isOrder bool) (err error)

func (*MmsTables) ReplacePKFile added in v1.1.1

func (m *MmsTables) ReplacePKFile(mstName string, file string, rec *record.Record, mark fragment.IndexFragment, oldIndexFiles []string) error

func (*MmsTables) Sequencer

func (m *MmsTables) Sequencer() *Sequencer

func (*MmsTables) SeriesTotal added in v1.1.0

func (m *MmsTables) SeriesTotal() uint64

func (*MmsTables) SetAccumulateMetaIndex added in v1.2.0

func (m *MmsTables) SetAccumulateMetaIndex(name string, aMetaIndex *AccumulateMetaIndex)

func (*MmsTables) SetAddFunc added in v1.0.0

func (m *MmsTables) SetAddFunc(addFunc func(int64))

func (*MmsTables) SetImmTableType added in v1.1.0

func (m *MmsTables) SetImmTableType(engineType config.EngineType)

func (*MmsTables) SetIndexMergeSet added in v1.3.0

func (m *MmsTables) SetIndexMergeSet(idx IndexMergeSet)

func (*MmsTables) SetLockPath added in v1.2.0

func (m *MmsTables) SetLockPath(lock *string)

func (*MmsTables) SetMstInfo added in v1.1.1

func (m *MmsTables) SetMstInfo(name string, mstInfo *meta.MeasurementInfo)

func (*MmsTables) SetObsOption added in v1.3.0

func (m *MmsTables) SetObsOption(option *obs.ObsOptions)

func (*MmsTables) SetOpId added in v1.0.0

func (m *MmsTables) SetOpId(shardId uint64, opId uint64)

func (*MmsTables) SetTier added in v1.2.0

func (m *MmsTables) SetTier(tier uint64)

func (*MmsTables) Tier

func (m *MmsTables) Tier() uint64

func (*MmsTables) Wait

func (m *MmsTables) Wait()

type MsBuilder

type MsBuilder struct {
	Path string
	TableData
	Conf *Config

	MaxIds int

	RowCount int64

	ShardID uint64

	Files     []TSSPFile
	FilesInfo []FileInfoExtend
	FileName  TSSPFileName

	EncodeChunkDataImp EncodeChunkData
	// contains filtered or unexported fields
}

func NewDetachedMsBuilder added in v1.2.0

func NewDetachedMsBuilder(dir, name string, lockPath *string, conf *Config, idCount int, fileName TSSPFileName,
	tier uint64, sequencer *Sequencer, estimateSize int, engineType config.EngineType, obsOpt *obs.ObsOptions, bfCols []string, fullTextIdx bool) (*MsBuilder, error)

func NewMsBuilder added in v1.1.0

func NewMsBuilder(dir, name string, lockPath *string, conf *Config, idCount int, fileName TSSPFileName, tier uint64,
	sequencer *Sequencer, estimateSize int, engineType config.EngineType, obsOpt *obs.ObsOptions, shardID uint64) *MsBuilder

func (*MsBuilder) BloomFilterNeedDetached added in v1.2.0

func (b *MsBuilder) BloomFilterNeedDetached(filterDetachedWriteTimes int) bool

func (*MsBuilder) CloseIndexWriters added in v1.3.0

func (b *MsBuilder) CloseIndexWriters() error

func (*MsBuilder) FileVersion

func (b *MsBuilder) FileVersion() uint64

func (*MsBuilder) Flush

func (b *MsBuilder) Flush() error

func (*MsBuilder) GetChunkBuilder added in v1.2.0

func (b *MsBuilder) GetChunkBuilder() *ChunkDataBuilder

func (*MsBuilder) GetFullTextIdx added in v1.2.0

func (b *MsBuilder) GetFullTextIdx() bool

func (*MsBuilder) GetIndexBuilder added in v1.3.0

func (b *MsBuilder) GetIndexBuilder() *index.IndexWriterBuilder

func (*MsBuilder) GetLocalBfCount added in v1.2.0

func (b *MsBuilder) GetLocalBfCount() int64

func (*MsBuilder) GetPKInfoNum added in v1.1.0

func (b *MsBuilder) GetPKInfoNum() int

func (*MsBuilder) GetPKMark added in v1.1.0

func (b *MsBuilder) GetPKMark(i int) fragment.IndexFragment

func (*MsBuilder) GetPKRecord added in v1.1.0

func (b *MsBuilder) GetPKRecord(i int) *record.Record

func (*MsBuilder) MaxRowsPerSegment

func (b *MsBuilder) MaxRowsPerSegment() int

func (*MsBuilder) Name

func (b *MsBuilder) Name() string

func (*MsBuilder) NewIndexWriterBuilder added in v1.3.0

func (b *MsBuilder) NewIndexWriterBuilder(schema record.Schemas, indexRelation influxql.IndexRelation)

func (*MsBuilder) NewPKIndexWriter added in v1.1.0

func (b *MsBuilder) NewPKIndexWriter()

func (*MsBuilder) NewTSSPFile

func (b *MsBuilder) NewTSSPFile(tmp bool) (TSSPFile, error)

func (*MsBuilder) Reset

func (b *MsBuilder) Reset()

func (*MsBuilder) SetEncodeChunkDataImp added in v1.2.0

func (b *MsBuilder) SetEncodeChunkDataImp(engineType config.EngineType)

func (*MsBuilder) SetFullTextIdx added in v1.2.0

func (b *MsBuilder) SetFullTextIdx(fullTextIdx bool)

func (*MsBuilder) SetLocalBfCount added in v1.2.0

func (b *MsBuilder) SetLocalBfCount(count int64)

func (*MsBuilder) SetTCLocation added in v1.1.1

func (b *MsBuilder) SetTCLocation(tcLocation int8)

func (*MsBuilder) SetTimeSorted added in v1.2.0

func (b *MsBuilder) SetTimeSorted(timeSorted bool)

func (*MsBuilder) SetToHot added in v1.4.0

func (b *MsBuilder) SetToHot()

func (*MsBuilder) Size

func (b *MsBuilder) Size() int64

func (*MsBuilder) StoreTimes added in v1.1.0

func (b *MsBuilder) StoreTimes()

func (*MsBuilder) SwitchChunkMeta added in v1.2.0

func (b *MsBuilder) SwitchChunkMeta() error

func (*MsBuilder) WithLog

func (b *MsBuilder) WithLog(log *logger.Logger)

func (*MsBuilder) WriteChunkMeta added in v1.2.0

func (b *MsBuilder) WriteChunkMeta(cm *ChunkMeta) (int, error)

func (*MsBuilder) WriteData

func (b *MsBuilder) WriteData(id uint64, data *record.Record) error

func (*MsBuilder) WriteDetached added in v1.2.0

func (b *MsBuilder) WriteDetached(id uint64, data *record.Record, pkSchema record.Schemas, firstFlush bool,
	accumulateMetaIndex *AccumulateMetaIndex) error

func (*MsBuilder) WriteDetachedIndex added in v1.3.0

func (b *MsBuilder) WriteDetachedIndex(writeRec *record.Record, rowsPerSegment []int) error

func (*MsBuilder) WriteDetachedMetaAndIndex added in v1.2.0

func (b *MsBuilder) WriteDetachedMetaAndIndex(writeRec *record.Record, pkSchema record.Schemas, firstFlush bool,
	accumulateMetaIndex *AccumulateMetaIndex, rowsPerSegment []int, fixRowsPerSegment int) error

func (*MsBuilder) WriteRecord

func (b *MsBuilder) WriteRecord(id uint64, data *record.Record, nextFile func(fn TSSPFileName) (seq uint64, lv uint16, merge uint16, ext uint16)) (*MsBuilder, error)

func (*MsBuilder) WriteRecordByCol added in v1.2.0

func (b *MsBuilder) WriteRecordByCol(id uint64, data *record.Record, schema record.Schemas, skipIndexRelation *influxql.IndexRelation,
	nextFile func(fn TSSPFileName) (seq uint64, lv uint16, merge uint16, ext uint16)) (*MsBuilder, error)

type PageCacheReader added in v1.2.0

type PageCacheReader struct {
	// contains filtered or unexported fields
}

func NewPageCacheReader added in v1.2.0

func NewPageCacheReader(t *Trailer, r *tsspFileReader) *PageCacheReader

func (*PageCacheReader) GetCachePageIdsAndOffsets added in v1.2.0

func (pcr *PageCacheReader) GetCachePageIdsAndOffsets(start int64, size uint32) ([]int64, []int64, error)

get all cache pageIds containning bytes from start to start + size

func (*PageCacheReader) GetMaxPageIdAndOffset added in v1.2.0

func (pcr *PageCacheReader) GetMaxPageIdAndOffset() (int64, int64)

func (*PageCacheReader) Init added in v1.2.0

func (pcr *PageCacheReader) Init()

func (*PageCacheReader) Read added in v1.2.0

func (pcr *PageCacheReader) Read(offset int64, size uint32, buf *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)

func (*PageCacheReader) ReadFixPageSize added in v1.2.0

func (pcr *PageCacheReader) ReadFixPageSize(offset int64, size uint32, buf *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)

read fileBytes of pages

func (*PageCacheReader) ReadSinglePage added in v1.2.0

func (pcr *PageCacheReader) ReadSinglePage(cacheKey string, pageOffset int64, pageSize int64, buf *[]byte, ioPriority int) (*readcache.CachePage, []byte, error)

func (*PageCacheReader) ReadVariablePageSize added in v1.2.0

func (pcr *PageCacheReader) ReadVariablePageSize(offset int64, size uint32, buf *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)

variablePage use Get/AddPage, thus not reuse buf in dataCacheIns

type ParquetTask added in v1.3.0

type ParquetTask struct {
	scheduler.BaseTask
	// contains filtered or unexported fields
}

func (*ParquetTask) Execute added in v1.3.0

func (t *ParquetTask) Execute()

func (*ParquetTask) GetSeries added in v1.3.0

func (t *ParquetTask) GetSeries(sId uint64) (map[string]string, error)

func (*ParquetTask) GetTagKeys added in v1.4.0

func (t *ParquetTask) GetTagKeys() (map[string]struct{}, error)

func (*ParquetTask) LockFiles added in v1.4.0

func (t *ParquetTask) LockFiles()

func (*ParquetTask) RemoveLog added in v1.4.0

func (t *ParquetTask) RemoveLog()

func (*ParquetTask) Stop added in v1.4.0

func (t *ParquetTask) Stop()

func (*ParquetTask) UnLockFiles added in v1.4.0

func (t *ParquetTask) UnLockFiles()

type PreAggBuilder

type PreAggBuilder interface {
	// contains filtered or unexported methods
}

type PreAggBuilders

type PreAggBuilders struct {
	// contains filtered or unexported fields
}

func (*PreAggBuilders) FloatBuilder added in v1.2.0

func (b *PreAggBuilders) FloatBuilder() *FloatPreAgg

func (*PreAggBuilders) IntegerBuilder added in v1.2.0

func (b *PreAggBuilders) IntegerBuilder() *IntegerPreAgg

func (*PreAggBuilders) Release

func (b *PreAggBuilders) Release()

type QueryfileCache added in v1.1.0

type QueryfileCache struct {
	// contains filtered or unexported fields
}

func GetQueryfileCache added in v1.1.0

func GetQueryfileCache() *QueryfileCache

func NewQueryfileCache added in v1.1.0

func NewQueryfileCache(cap uint32) *QueryfileCache

func (*QueryfileCache) Get added in v1.1.0

func (qfc *QueryfileCache) Get()

func (*QueryfileCache) GetCap added in v1.1.0

func (qfc *QueryfileCache) GetCap() uint32

func (*QueryfileCache) Put added in v1.1.0

func (qfc *QueryfileCache) Put(f TSSPFile)

type ReadContext

type ReadContext struct {
	Ascending bool
	// contains filtered or unexported fields
}

func NewReadContext

func NewReadContext(ascending bool) *ReadContext

func (*ReadContext) GetCoder added in v1.1.0

func (d *ReadContext) GetCoder() *encoding.CoderContext

func (*ReadContext) GetOps

func (d *ReadContext) GetOps() []*comm.CallOption

func (*ReadContext) GetReadBuff added in v1.1.0

func (d *ReadContext) GetReadBuff() []byte

func (*ReadContext) InitPreAggBuilder

func (d *ReadContext) InitPreAggBuilder()

func (*ReadContext) IsAborted added in v1.3.0

func (d *ReadContext) IsAborted() bool

func (*ReadContext) MatchPreAgg

func (d *ReadContext) MatchPreAgg() bool

func (*ReadContext) Release

func (d *ReadContext) Release()

func (*ReadContext) Reset

func (d *ReadContext) Reset()

func (*ReadContext) Set

func (d *ReadContext) Set(ascending bool, tr util.TimeRange, onlyFirstOrLast bool, ops []*comm.CallOption)

func (*ReadContext) SetClosedSignal added in v1.2.0

func (d *ReadContext) SetClosedSignal(s *bool)

func (*ReadContext) SetOps

func (d *ReadContext) SetOps(c []*comm.CallOption)

func (*ReadContext) SetSpan added in v1.1.0

func (d *ReadContext) SetSpan(readSpan, filterSpan *tracing.Span)

func (*ReadContext) SetTr

func (d *ReadContext) SetTr(tr util.TimeRange)

type Segment

type Segment struct {
	// contains filtered or unexported fields
}

Segment offset/size/minT/maxT

type SegmentMeta added in v1.2.0

type SegmentMeta struct {
	// contains filtered or unexported fields
}

func NewSegmentMeta added in v1.2.0

func NewSegmentMeta(id int, c *ChunkMeta) *SegmentMeta

func (*SegmentMeta) GetMaxTime added in v1.2.0

func (s *SegmentMeta) GetMaxTime() int64

func (*SegmentMeta) GetMinTime added in v1.2.0

func (s *SegmentMeta) GetMinTime() int64

type SegmentRange

type SegmentRange [2]int64 // min/max

type SegmentReader added in v1.0.0

type SegmentReader struct {
	// contains filtered or unexported fields
}

func NewSegmentReader added in v1.0.0

func NewSegmentReader(fi *FileIterator) *SegmentReader

func (*SegmentReader) Read added in v1.0.0

func (sr *SegmentReader) Read(seg Segment, ref *record.Field, col *record.ColVal) error

type SegmentSequenceReader added in v1.3.0

type SegmentSequenceReader struct {
	// contains filtered or unexported fields
}

func NewSegmentSequenceReader added in v1.3.0

func NewSegmentSequenceReader(path *sparseindex.OBSFilterPath, taskID int, count uint64, consumeInfo *consume.ConsumeInfo, schema record.Schemas, filterOpt *FilterOptions) (*SegmentSequenceReader, error)

func (*SegmentSequenceReader) Close added in v1.3.0

func (reader *SegmentSequenceReader) Close()

func (*SegmentSequenceReader) ConsumeDateByShard added in v1.3.0

func (reader *SegmentSequenceReader) ConsumeDateByShard() ([]map[string]interface{}, bool, int64, uint64, int64, error)

type SegmentTask added in v1.2.0

type SegmentTask struct {
	// contains filtered or unexported fields
}

type SequenceIterator added in v1.3.0

type SequenceIterator interface {
	SetChunkMetasReader(reader SequenceIteratorChunkMetaReader)
	Release()
	AddFiles(files []TSSPFile)
	Stop()
	Run() error
	Buffer() *pool.Buffer
}

func NewSequenceIterator added in v1.3.0

func NewSequenceIterator(handler SequenceIteratorHandler, logger *Log.Logger) SequenceIterator

type SequenceIteratorChunkMetaReader added in v1.3.0

type SequenceIteratorChunkMetaReader interface {
	ReadChunkMetas(f TSSPFile, idx int) ([]byte, []uint32, error)
}

type SequenceIteratorHandler added in v1.3.0

type SequenceIteratorHandler interface {
	Init(map[string]interface{}) error
	Begin()
	NextFile(TSSPFile)
	NextChunkMeta(cm *ChunkMeta) error
	Limited() bool
	Finish()
}

func NewSeriesKeysIteratorHandler added in v1.4.0

func NewSeriesKeysIteratorHandler(idx IndexMergeSet, condition influxql.Expr, tr *util.TimeRange, limit int) SequenceIteratorHandler

func NewTagValuesIteratorHandler added in v1.3.0

func NewTagValuesIteratorHandler(idx IndexMergeSet, condition influxql.Expr, tr *util.TimeRange, limit int) SequenceIteratorHandler

type Sequencer

type Sequencer struct {
	// contains filtered or unexported fields
}

func NewSequencer

func NewSequencer() *Sequencer

func (*Sequencer) AddRowCounts

func (s *Sequencer) AddRowCounts(mn string, id uint64, rowCounts int64)

func (*Sequencer) BatchUpdateCheckTime

func (s *Sequencer) BatchUpdateCheckTime(p *IdTimePairs, incrRows bool)

func (*Sequencer) DelMmsIdTime added in v1.1.0

func (s *Sequencer) DelMmsIdTime(name string)

func (*Sequencer) Get

func (s *Sequencer) Get(mn string, id uint64) (lastFlushTime, rowCnt int64)

func (*Sequencer) GetMmsIdTime added in v1.3.0

func (s *Sequencer) GetMmsIdTime(name string) *MmsIdTime

func (*Sequencer) IsLoading added in v1.0.0

func (s *Sequencer) IsLoading() bool

func (*Sequencer) ResetMmsIdTime added in v1.1.0

func (s *Sequencer) ResetMmsIdTime()

func (*Sequencer) SeriesTotal added in v1.1.0

func (s *Sequencer) SeriesTotal() uint64

func (*Sequencer) SetStat added in v1.1.0

func (s *Sequencer) SetStat(free, loading bool)

func (*Sequencer) SetToInLoading added in v1.1.0

func (s *Sequencer) SetToInLoading() bool

func (*Sequencer) UnRef added in v1.1.0

func (s *Sequencer) UnRef()

type SeriesCounter added in v1.1.0

type SeriesCounter struct {
	// contains filtered or unexported fields
}

func (*SeriesCounter) DecrN added in v1.1.0

func (sc *SeriesCounter) DecrN(n uint64)

func (*SeriesCounter) Get added in v1.1.0

func (sc *SeriesCounter) Get() uint64

func (*SeriesCounter) Incr added in v1.1.0

func (sc *SeriesCounter) Incr()

func (*SeriesCounter) Reset added in v1.1.0

func (sc *SeriesCounter) Reset()

type SeriesKeys added in v1.4.0

type SeriesKeys struct {
	// contains filtered or unexported fields
}

func (*SeriesKeys) Add added in v1.4.0

func (s *SeriesKeys) Add(key, _ string)

func (*SeriesKeys) Count added in v1.4.0

func (s *SeriesKeys) Count() int

func (*SeriesKeys) ForEach added in v1.4.0

func (s *SeriesKeys) ForEach(process func(key, _ string))

type SeriesKeysIteratorHandler added in v1.4.0

type SeriesKeysIteratorHandler struct {
	// contains filtered or unexported fields
}

func (*SeriesKeysIteratorHandler) Begin added in v1.4.0

func (h *SeriesKeysIteratorHandler) Begin()

func (*SeriesKeysIteratorHandler) Finish added in v1.4.0

func (h *SeriesKeysIteratorHandler) Finish()

func (*SeriesKeysIteratorHandler) Init added in v1.4.0

func (h *SeriesKeysIteratorHandler) Init(param map[string]interface{}) error

func (*SeriesKeysIteratorHandler) Limited added in v1.4.0

func (h *SeriesKeysIteratorHandler) Limited() bool

func (*SeriesKeysIteratorHandler) NextChunkMeta added in v1.4.0

func (h *SeriesKeysIteratorHandler) NextChunkMeta(cm *ChunkMeta) error

func (*SeriesKeysIteratorHandler) NextFile added in v1.4.0

func (h *SeriesKeysIteratorHandler) NextFile(TSSPFile)

type SortKeyIterator added in v1.1.1

type SortKeyIterator struct {
	*FileIterator
	// contains filtered or unexported fields
}

func NewSortKeyIterator added in v1.1.1

func NewSortKeyIterator(fi *FileIterator, sortKeyFields []record.Field, ctx *ReadContext, schema record.Schemas, tcDuration time.Duration, compactWithBlock bool, fileIdx int) (*SortKeyIterator, error)

func (*SortKeyIterator) GetNewRecord added in v1.1.1

func (s *SortKeyIterator) GetNewRecord(tcDuration time.Duration, compactWithBlock bool) error

func (*SortKeyIterator) NextSingleFragment added in v1.1.1

func (s *SortKeyIterator) NextSingleFragment(tbStore *MmsTables, impl *IteratorByRow, pkSchema record.Schemas) (*record.Record, error)

type SortLimitCursor added in v1.3.0

type SortLimitCursor struct {
	// contains filtered or unexported fields
}

func NewSortLimitCursor added in v1.3.0

func NewSortLimitCursor(options hybridqp.Options, schemas record.Schemas, input comm.TimeCutKeyCursor, shardId int64) *SortLimitCursor

func (*SortLimitCursor) Close added in v1.3.0

func (t *SortLimitCursor) Close() error

func (*SortLimitCursor) EndSpan added in v1.3.0

func (t *SortLimitCursor) EndSpan()

func (*SortLimitCursor) GetInput added in v1.3.0

func (t *SortLimitCursor) GetInput() comm.TimeCutKeyCursor

func (*SortLimitCursor) GetSchema added in v1.3.0

func (t *SortLimitCursor) GetSchema() record.Schemas

func (*SortLimitCursor) Name added in v1.3.0

func (t *SortLimitCursor) Name() string

func (*SortLimitCursor) Next added in v1.3.0

func (*SortLimitCursor) NextAggData added in v1.3.0

func (t *SortLimitCursor) NextAggData() (*record.Record, *comm.FileInfo, error)

func (*SortLimitCursor) SetOps added in v1.3.0

func (t *SortLimitCursor) SetOps(ops []*comm.CallOption)

func (*SortLimitCursor) SinkPlan added in v1.3.0

func (t *SortLimitCursor) SinkPlan(plan hybridqp.QueryNode)

func (*SortLimitCursor) StartSpan added in v1.3.0

func (t *SortLimitCursor) StartSpan(span *tracing.Span)

type SortLimitRows added in v1.3.0

type SortLimitRows struct {
	// contains filtered or unexported fields
}

func NewSortLimitRows added in v1.3.0

func NewSortLimitRows(sortIndex []int, schema record.Schemas, shardId int64) *SortLimitRows

func (*SortLimitRows) Len added in v1.3.0

func (rs *SortLimitRows) Len() int

func (*SortLimitRows) Less added in v1.3.0

func (rs *SortLimitRows) Less(i, j int) bool

func (*SortLimitRows) Pop added in v1.3.0

func (rs *SortLimitRows) Pop() interface{}

func (*SortLimitRows) PopToRec added in v1.3.0

func (rs *SortLimitRows) PopToRec() *record.Record

func (*SortLimitRows) Push added in v1.3.0

func (rs *SortLimitRows) Push(x interface{})

func (*SortLimitRows) Swap added in v1.3.0

func (rs *SortLimitRows) Swap(i, j int)

type StreamCompactParquetEvent added in v1.3.0

type StreamCompactParquetEvent struct {
	TSSP2ParquetEvent
}

func (*StreamCompactParquetEvent) Instance added in v1.3.0

func (e *StreamCompactParquetEvent) Instance() Event

func (*StreamCompactParquetEvent) OnWriteChunkMeta added in v1.3.0

func (e *StreamCompactParquetEvent) OnWriteChunkMeta(cm *ChunkMeta)

type StreamIterator

type StreamIterator struct {
	*FileIterator
	// contains filtered or unexported fields
}

func NewStreamStreamIterator

func NewStreamStreamIterator(fi *FileIterator) *StreamIterator

type StreamIterators

type StreamIterators struct {
	TableData

	Conf *Config
	// contains filtered or unexported fields
}

func (*StreamIterators) Close

func (c *StreamIterators) Close()

func (*StreamIterators) FileVersion

func (c *StreamIterators) FileVersion() uint64

func (*StreamIterators) Flush

func (c *StreamIterators) Flush() error

func (*StreamIterators) Init

func (c *StreamIterators) Init(id uint64, chunkDataOffset int64, schema record.Schemas)

func (*StreamIterators) InitEvents added in v1.3.0

func (c *StreamIterators) InitEvents(level uint16) *Events

func (*StreamIterators) Len

func (c *StreamIterators) Len() int

func (*StreamIterators) Less

func (c *StreamIterators) Less(i, j int) bool

func (*StreamIterators) ListenCloseSignal added in v1.2.0

func (c *StreamIterators) ListenCloseSignal(finish chan struct{})

func (*StreamIterators) NewFile

func (c *StreamIterators) NewFile(addFileExt bool) error

func (*StreamIterators) NewTSSPFile

func (c *StreamIterators) NewTSSPFile(tmp bool) (TSSPFile, error)

func (*StreamIterators) Pop

func (c *StreamIterators) Pop() interface{}

func (*StreamIterators) Push

func (c *StreamIterators) Push(v interface{})

func (*StreamIterators) RemoveTmpFiles added in v1.3.0

func (c *StreamIterators) RemoveTmpFiles()

func (*StreamIterators) SetWriter added in v1.2.0

func (c *StreamIterators) SetWriter(w fileops.FileWriter)

func (*StreamIterators) Size

func (c *StreamIterators) Size() int64

func (*StreamIterators) Swap

func (c *StreamIterators) Swap(i, j int)

func (*StreamIterators) SwitchChunkMeta added in v1.2.0

func (c *StreamIterators) SwitchChunkMeta() error

func (*StreamIterators) WithLog

func (c *StreamIterators) WithLog(log *Log.Logger)

func (*StreamIterators) WriteChunkMeta added in v1.2.0

func (c *StreamIterators) WriteChunkMeta(cm *ChunkMeta) (int, error)

type StreamIteratorsPool

type StreamIteratorsPool struct {
	// contains filtered or unexported fields
}

func NewStreamIteratorsPool

func NewStreamIteratorsPool(n int) *StreamIteratorsPool

type StreamWriteFile added in v1.0.0

type StreamWriteFile struct {
	TableData

	Conf *Config
	// contains filtered or unexported fields
}

func NewWriteScanFile added in v1.0.0

func NewWriteScanFile(mst string, m *MmsTables, file TSSPFile, schema record.Schemas) (*StreamWriteFile, error)

func (*StreamWriteFile) AppendColumn added in v1.0.0

func (c *StreamWriteFile) AppendColumn(ref *record.Field) error

func (*StreamWriteFile) ChangeColumn added in v1.0.0

func (c *StreamWriteFile) ChangeColumn(ref record.Field) error

func (*StreamWriteFile) ChangeSid added in v1.0.0

func (c *StreamWriteFile) ChangeSid(sid uint64)

func (*StreamWriteFile) Close added in v1.0.0

func (c *StreamWriteFile) Close(isError bool)

func (*StreamWriteFile) Flush added in v1.0.0

func (c *StreamWriteFile) Flush() error

func (*StreamWriteFile) GetTSSPFile added in v1.0.0

func (c *StreamWriteFile) GetTSSPFile() TSSPFile

func (*StreamWriteFile) Init added in v1.0.0

func (c *StreamWriteFile) Init(id uint64, chunkDataOffset int64, schema record.Schemas)

func (*StreamWriteFile) InitFile added in v1.0.0

func (c *StreamWriteFile) InitFile(seq uint64) error

func (*StreamWriteFile) InitMergedFile added in v1.0.0

func (c *StreamWriteFile) InitMergedFile(f TSSPFile) error

func (*StreamWriteFile) NewFile added in v1.0.0

func (c *StreamWriteFile) NewFile(addFileExt bool) error

func (*StreamWriteFile) NewTSSPFile added in v1.0.0

func (c *StreamWriteFile) NewTSSPFile(tmp bool) (TSSPFile, error)

func (*StreamWriteFile) SetValidate added in v1.1.0

func (c *StreamWriteFile) SetValidate(en bool)

func (*StreamWriteFile) Size added in v1.0.0

func (c *StreamWriteFile) Size() int64

func (*StreamWriteFile) SortColumns added in v1.2.0

func (c *StreamWriteFile) SortColumns()

func (*StreamWriteFile) SwitchChunkMeta added in v1.2.0

func (c *StreamWriteFile) SwitchChunkMeta() error

func (*StreamWriteFile) WriteChunkMeta added in v1.2.0

func (c *StreamWriteFile) WriteChunkMeta(cm *ChunkMeta) (int, error)

func (*StreamWriteFile) WriteCurrentMeta added in v1.0.0

func (c *StreamWriteFile) WriteCurrentMeta() error

func (*StreamWriteFile) WriteData added in v1.0.0

func (c *StreamWriteFile) WriteData(id uint64, ref record.Field, col record.ColVal, timeCol *record.ColVal) error

func (*StreamWriteFile) WriteFile added in v1.0.0

func (c *StreamWriteFile) WriteFile() error

func (*StreamWriteFile) WriteMeta added in v1.0.0

func (c *StreamWriteFile) WriteMeta(cm *ChunkMeta) error

type StringPreAgg

type StringPreAgg struct {
	// contains filtered or unexported fields
}

StringPreAgg If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.

func NewStringPreAgg

func NewStringPreAgg() *StringPreAgg

type TSSP2ParquetEvent added in v1.3.0

type TSSP2ParquetEvent struct {
	Event
	// contains filtered or unexported fields
}

func (*TSSP2ParquetEvent) Enable added in v1.3.0

func (e *TSSP2ParquetEvent) Enable() bool

func (*TSSP2ParquetEvent) Init added in v1.3.0

func (e *TSSP2ParquetEvent) Init(mst string, level uint16)

func (*TSSP2ParquetEvent) OnFinish added in v1.3.0

func (e *TSSP2ParquetEvent) OnFinish(ctx *EventContext)

func (*TSSP2ParquetEvent) OnInterrupt added in v1.3.0

func (e *TSSP2ParquetEvent) OnInterrupt()

func (*TSSP2ParquetEvent) OnNewFile added in v1.3.0

func (e *TSSP2ParquetEvent) OnNewFile(f TSSPFile)

func (*TSSP2ParquetEvent) OnReplaceFile added in v1.3.0

func (e *TSSP2ParquetEvent) OnReplaceFile(shardDir string, lockFile string) error

type TSSP2ParquetPlan added in v1.3.0

type TSSP2ParquetPlan struct {
	Mst    string
	Schema map[string]uint8
	Files  []string
	// contains filtered or unexported fields
}

func (*TSSP2ParquetPlan) Init added in v1.3.0

func (p *TSSP2ParquetPlan) Init(mst string, level uint16)

type TSSPFile

type TSSPFile interface {
	Path() string
	Name() string
	FileName() TSSPFileName
	LevelAndSequence() (uint16, uint64)
	FileNameMerge() uint16
	FileNameExtend() uint16
	IsOrder() bool
	Ref()
	Unref()
	RefFileReader()
	UnrefFileReader()
	Stop()
	Inuse() bool
	MetaIndexAt(idx int) (*MetaIndex, error)
	MetaIndex(id uint64, tr util.TimeRange) (int, *MetaIndex, error)
	ChunkMeta(id uint64, offset int64, size, itemCount uint32, metaIdx int, ctx *ChunkMetaContext, ioPriority int) (*ChunkMeta, error)
	ReadAt(cm *ChunkMeta, segment int, dst *record.Record, decs *ReadContext, ioPriority int) (*record.Record, error)
	ReadData(offset int64, size uint32, dst *[]byte, ioPriority int) ([]byte, error)
	ReadChunkMetaData(metaIdx int, m *MetaIndex, dst []ChunkMeta, ioPriority int) ([]ChunkMeta, error)

	FileStat() *Trailer
	// FileSize get the size of the disk occupied by file
	FileSize() int64
	// InMemSize get the size of the memory occupied by file
	InMemSize() int64
	Contains(id uint64) (bool, error)
	ContainsByTime(tr util.TimeRange) (bool, error)
	ContainsValue(id uint64, tr util.TimeRange) (bool, error)
	MinMaxTime() (int64, int64, error)

	Open() error
	Close() error
	LoadIntoMemory() error
	LoadComponents() error
	LoadIdTimes(p *IdTimePairs) error
	Rename(newName string) error
	UpdateLevel(level uint16)
	Remove() error
	FreeMemory()
	FreeFileHandle() error
	Version() uint64
	AverageChunkRows() int
	MaxChunkRows() int
	MetaIndexItemNum() int64
	GetFileReaderRef() int64
	RenameOnObs(obsName string, tmp bool, opt *obs.ObsOptions) error

	ChunkMetaCompressMode() uint8
}

func OpenTSSPFile

func OpenTSSPFile(name string, lockPath *string, isOrder bool) (TSSPFile, error)

type TSSPFileAttachedReader added in v1.2.0

type TSSPFileAttachedReader struct {
	// contains filtered or unexported fields
}

func NewTSSPFileAttachedReader added in v1.2.0

func NewTSSPFileAttachedReader(files []TSSPFile, fragRanges []fragment.FragmentRanges, ctx *FileReaderContext, schema hybridqp.Options,
	unnest *influxql.Unnest) (*TSSPFileAttachedReader, error)

func (*TSSPFileAttachedReader) Close added in v1.2.0

func (t *TSSPFileAttachedReader) Close() error

func (*TSSPFileAttachedReader) EndSpan added in v1.2.0

func (t *TSSPFileAttachedReader) EndSpan()

func (*TSSPFileAttachedReader) GetSchema added in v1.2.0

func (t *TSSPFileAttachedReader) GetSchema() record.Schemas

func (*TSSPFileAttachedReader) Name added in v1.2.0

func (t *TSSPFileAttachedReader) Name() string

func (*TSSPFileAttachedReader) Next added in v1.2.0

func (*TSSPFileAttachedReader) NextAggData added in v1.2.0

func (t *TSSPFileAttachedReader) NextAggData() (*record.Record, *comm.FileInfo, error)

func (*TSSPFileAttachedReader) ResetBy added in v1.2.0

func (t *TSSPFileAttachedReader) ResetBy(files []TSSPFile, fragRanges []fragment.FragmentRanges) error

func (*TSSPFileAttachedReader) SetOps added in v1.2.0

func (t *TSSPFileAttachedReader) SetOps(ops []*comm.CallOption)

func (*TSSPFileAttachedReader) SinkPlan added in v1.2.0

func (t *TSSPFileAttachedReader) SinkPlan(plan hybridqp.QueryNode)

func (*TSSPFileAttachedReader) StartSpan added in v1.2.0

func (t *TSSPFileAttachedReader) StartSpan(span *tracing.Span)

type TSSPFileDetachedReader added in v1.2.0

type TSSPFileDetachedReader struct {
	// contains filtered or unexported fields
}

func NewTSSPFileDetachedReader added in v1.2.0

func NewTSSPFileDetachedReader(metaIndex []*MetaIndex, blocks [][]int, ctx *FileReaderContext, path *sparseindex.OBSFilterPath, unnest *influxql.Unnest,
	isSort bool, options hybridqp.Options) (*TSSPFileDetachedReader, error)

func (*TSSPFileDetachedReader) Close added in v1.2.0

func (t *TSSPFileDetachedReader) Close() error

func (*TSSPFileDetachedReader) EndSpan added in v1.2.0

func (t *TSSPFileDetachedReader) EndSpan()

func (*TSSPFileDetachedReader) GetSchema added in v1.2.0

func (t *TSSPFileDetachedReader) GetSchema() record.Schemas

func (*TSSPFileDetachedReader) Name added in v1.2.0

func (t *TSSPFileDetachedReader) Name() string

func (*TSSPFileDetachedReader) Next added in v1.2.0

func (*TSSPFileDetachedReader) NextAggData added in v1.2.0

func (t *TSSPFileDetachedReader) NextAggData() (*record.Record, *comm.FileInfo, error)

func (*TSSPFileDetachedReader) ResetBy added in v1.2.0

func (t *TSSPFileDetachedReader) ResetBy(metaIndex []*MetaIndex, blocks [][]int, ctx *FileReaderContext)

func (*TSSPFileDetachedReader) SetOps added in v1.2.0

func (t *TSSPFileDetachedReader) SetOps(ops []*comm.CallOption)

func (*TSSPFileDetachedReader) SinkPlan added in v1.2.0

func (t *TSSPFileDetachedReader) SinkPlan(plan hybridqp.QueryNode)

func (*TSSPFileDetachedReader) StartSpan added in v1.2.0

func (t *TSSPFileDetachedReader) StartSpan(span *tracing.Span)

func (*TSSPFileDetachedReader) UpdateTime added in v1.3.0

func (t *TSSPFileDetachedReader) UpdateTime(time int64)

type TSSPFileName

type TSSPFileName struct {
	// contains filtered or unexported fields
}

func NewTSSPFileName

func NewTSSPFileName(seq uint64, level, merge, extent uint16, order bool, lockPath *string) TSSPFileName

func (*TSSPFileName) Equal

func (n *TSSPFileName) Equal(other *TSSPFileName) bool

func (*TSSPFileName) ParseFileName

func (n *TSSPFileName) ParseFileName(name string) error

func (*TSSPFileName) Path

func (n *TSSPFileName) Path(dir string, tmp bool) string

func (*TSSPFileName) SetExtend

func (n *TSSPFileName) SetExtend(extend uint16)

func (*TSSPFileName) SetLevel

func (n *TSSPFileName) SetLevel(l uint16)

func (*TSSPFileName) SetMerge

func (n *TSSPFileName) SetMerge(merge uint16)

func (*TSSPFileName) SetOrder

func (n *TSSPFileName) SetOrder(v bool)

func (*TSSPFileName) SetSeq

func (n *TSSPFileName) SetSeq(seq uint64)

func (*TSSPFileName) String

func (n *TSSPFileName) String() string

func (*TSSPFileName) TmpPath

func (n *TSSPFileName) TmpPath(dir string) string

type TSSPFiles

type TSSPFiles struct {
	// contains filtered or unexported fields
}

func NewTSSPFiles

func NewTSSPFiles() *TSSPFiles

func (*TSSPFiles) Append

func (f *TSSPFiles) Append(file ...TSSPFile)

func (*TSSPFiles) AppendReloadFiles added in v1.4.0

func (f *TSSPFiles) AppendReloadFiles(file ...TSSPInfo)

func (*TSSPFiles) Files

func (f *TSSPFiles) Files() []TSSPFile

func (*TSSPFiles) Len

func (f *TSSPFiles) Len() int

func (*TSSPFiles) Less

func (f *TSSPFiles) Less(i, j int) bool

func (*TSSPFiles) Lock added in v1.4.0

func (f *TSSPFiles) Lock()

func (*TSSPFiles) MaxMerged added in v1.3.0

func (f *TSSPFiles) MaxMerged() uint16

func (*TSSPFiles) MergedLevelCount added in v1.4.0

func (f *TSSPFiles) MergedLevelCount(level uint16) int

func (*TSSPFiles) RLock added in v1.2.0

func (f *TSSPFiles) RLock()

func (*TSSPFiles) RUnlock added in v1.2.0

func (f *TSSPFiles) RUnlock()

func (*TSSPFiles) StopFiles

func (f *TSSPFiles) StopFiles()

func (*TSSPFiles) Swap

func (f *TSSPFiles) Swap(i, j int)

func (*TSSPFiles) Unlock added in v1.4.0

func (f *TSSPFiles) Unlock()

type TSSPInfo added in v1.4.0

type TSSPInfo interface {
	Order() bool
	FilePath() string
	FileName() *TSSPFileName
	LevelAndSequence() (uint16, uint64)
	FileNameExtend() uint16
}

type TableData

type TableData struct {
	// contains filtered or unexported fields
}

type TableReaders

type TableReaders []TSSPFile

func (TableReaders) Len

func (tables TableReaders) Len() int

func (TableReaders) Less

func (tables TableReaders) Less(i, j int) bool

func (TableReaders) Swap

func (tables TableReaders) Swap(i, j int)

type TableStat

type TableStat struct {
	ExtraData
	// contains filtered or unexported fields
}

type TableStoreGC

type TableStoreGC struct {
	// contains filtered or unexported fields
}

func (*TableStoreGC) Add

func (sgc *TableStoreGC) Add(files ...TSSPFile)

func (*TableStoreGC) GC

func (sgc *TableStoreGC) GC()

type TablesGC

type TablesGC interface {
	Add(files ...TSSPFile)
	GC()
}

func NewTableStoreGC

func NewTableStoreGC() TablesGC

type TablesStore

type TablesStore interface {
	SetOpId(shardId uint64, opId uint64)
	Open() (int64, error)
	Close() error
	AddTable(ms *MsBuilder, isOrder bool, tmp bool)
	AddTSSPFiles(name string, isOrder bool, f ...TSSPFile)
	AddBothTSSPFiles(flushed *bool, name string, orderFiles []TSSPFile, unorderFiles []TSSPFile)
	AddPKFile(name, file string, rec *record.Record, mark fragment.IndexFragment, tcLocation int8)
	GetPKFile(mstName string, file string) (pkInfo *colstore.PKInfo, ok bool)
	FreeAllMemReader()
	ReplaceFiles(name string, oldFiles, newFiles []TSSPFile, isOrder bool) error
	GetBothFilesRef(measurement string, hasTimeFilter bool, tr util.TimeRange, flushed *bool) ([]TSSPFile, []TSSPFile, bool)
	ReplaceDownSampleFiles(mstNames []string, originFiles [][]TSSPFile, newFiles [][]TSSPFile, isOrder bool, callBack func()) error
	NextSequence() uint64
	Sequencer() *Sequencer
	GetTSSPFiles(mm string, isOrder bool) (*TSSPFiles, bool)
	GetCSFiles(mm string) (*TSSPFiles, bool)
	CopyCSFiles(name string) []TSSPFile
	Tier() uint64
	SetTier(tier uint64)
	File(name string, namePath string, isOrder bool) TSSPFile
	CompactDone(seq []string)
	CompactionEnable()
	CompactionDisable()
	MergeEnable()
	MergeDisable()
	CompactionEnabled() bool
	MergeEnabled() bool
	IsOutOfOrderFilesExist() bool
	MergeOutOfOrder(shId uint64, full bool, force bool) error
	LevelCompact(level uint16, shid uint64) error
	FullCompact(shid uint64) error
	SetAddFunc(addFunc func(int64))
	LoadSequencer()
	GetRowCountsBySid(measurement string, sid uint64) (int64, error)
	AddRowCountsBySid(measurement string, sid uint64, rowCounts int64)
	GetOutOfOrderFileNum() int
	GetTableFileNum(string, bool) int
	GetMstFileStat() *stats.FileStat
	DropMeasurement(ctx context.Context, name string) error
	GetFileSeq() uint64
	DisableCompAndMerge()
	EnableCompAndMerge()
	FreeSequencer() bool
	SetImmTableType(engineType config.EngineType)
	SetMstInfo(name string, mstInfo *meta.MeasurementInfo)
	SetAccumulateMetaIndex(name string, aMetaIndex *AccumulateMetaIndex)
	GetMstInfo(name string) (*meta.MeasurementInfo, bool)
	SeriesTotal() uint64
	SetLockPath(lock *string)
	FullyCompacted() bool
	SetObsOption(option *obs.ObsOptions)
	GetObsOption() *obs.ObsOptions
	GetShardID() uint64
	SetIndexMergeSet(idx IndexMergeSet)
	GetAllMstList() []string
}

type TagSets added in v1.3.0

type TagSets struct {
	// contains filtered or unexported fields
}

func (*TagSets) Add added in v1.3.0

func (s *TagSets) Add(key, val string)

func (*TagSets) Count added in v1.4.0

func (s *TagSets) Count() int

func (*TagSets) ForEach added in v1.3.0

func (s *TagSets) ForEach(process func(tagKey, tagValue string))

type TagValuesIteratorHandler added in v1.3.0

type TagValuesIteratorHandler struct {
	// contains filtered or unexported fields
}

func (*TagValuesIteratorHandler) Begin added in v1.3.0

func (h *TagValuesIteratorHandler) Begin()

func (*TagValuesIteratorHandler) Finish added in v1.3.0

func (h *TagValuesIteratorHandler) Finish()

func (*TagValuesIteratorHandler) Init added in v1.3.0

func (h *TagValuesIteratorHandler) Init(param map[string]interface{}) error

func (*TagValuesIteratorHandler) Limited added in v1.3.0

func (h *TagValuesIteratorHandler) Limited() bool

func (*TagValuesIteratorHandler) NextChunkMeta added in v1.3.0

func (h *TagValuesIteratorHandler) NextChunkMeta(cm *ChunkMeta) error

func (*TagValuesIteratorHandler) NextFile added in v1.3.0

func (h *TagValuesIteratorHandler) NextFile(TSSPFile)

type TimePreAgg

type TimePreAgg struct {
	// contains filtered or unexported fields
}

func NewTimePreAgg

func NewTimePreAgg() *TimePreAgg

type Trailer

type Trailer struct {
	TableStat
	// contains filtered or unexported fields
}

func (*Trailer) ContainsId

func (t *Trailer) ContainsId(id uint64) bool

func (*Trailer) ContainsTime

func (t *Trailer) ContainsTime(tm util.TimeRange) bool

func (*Trailer) DataSize added in v1.2.0

func (t *Trailer) DataSize() int64

func (*Trailer) IndexSize added in v1.2.0

func (t *Trailer) IndexSize() int64

func (*Trailer) Marshal added in v1.4.0

func (t *Trailer) Marshal(dst []byte) []byte

func (*Trailer) MetaIndexItemNum added in v1.0.0

func (t *Trailer) MetaIndexItemNum() int64

func (*Trailer) MetaIndexSize added in v1.2.0

func (t *Trailer) MetaIndexSize() int64

func (*Trailer) SetChunkMetaCompressFlag added in v1.2.0

func (t *Trailer) SetChunkMetaCompressFlag()

func (*Trailer) Unmarshal added in v1.4.0

func (t *Trailer) Unmarshal(src []byte) ([]byte, error)

type TsChunkDataImp added in v1.2.0

type TsChunkDataImp struct {
}

func (*TsChunkDataImp) EncodeChunk added in v1.2.0

func (t *TsChunkDataImp) EncodeChunk(b *ChunkDataBuilder, id uint64, offset int64, rec *record.Record, dst []byte, timeSorted bool) ([]byte, error)

func (*TsChunkDataImp) EncodeChunkForCompaction added in v1.2.0

func (t *TsChunkDataImp) EncodeChunkForCompaction(b *ChunkDataBuilder, offset int64, rec *record.Record, dst []byte, accumulateRowsIndex []int, timeSorted bool) ([]byte, error)

func (*TsChunkDataImp) SetAccumulateRowsIndex added in v1.2.0

func (t *TsChunkDataImp) SetAccumulateRowsIndex(accumulateRowsIndex []int)

func (*TsChunkDataImp) SetDetachedInfo added in v1.2.0

func (t *TsChunkDataImp) SetDetachedInfo(writeDetached bool)

type UnnestOperator added in v1.3.0

type UnnestOperator interface {
	Compute(rec *record.Record)
}

func GetUnnestFuncOperator added in v1.3.0

func GetUnnestFuncOperator(unnest *influxql.Unnest, schema record.Schemas) (UnnestOperator, error)

func NewMatchAllOperator added in v1.3.0

func NewMatchAllOperator(unnest *influxql.Unnest, schemas record.Schemas) UnnestOperator

type UnorderedColumn added in v1.3.0

type UnorderedColumn struct {
	// contains filtered or unexported fields
}

func (*UnorderedColumn) Init added in v1.3.0

func (c *UnorderedColumn) Init(meta *ColumnMeta, idx int)

type UnorderedColumnReader added in v1.0.0

type UnorderedColumnReader struct {
	// contains filtered or unexported fields
}

func (*UnorderedColumnReader) ChangeColumn added in v1.3.0

func (r *UnorderedColumnReader) ChangeColumn(sid uint64, ref *record.Field)

func (*UnorderedColumnReader) ChangeSeries added in v1.3.0

func (r *UnorderedColumnReader) ChangeSeries(sid uint64) error

func (*UnorderedColumnReader) Close added in v1.3.0

func (r *UnorderedColumnReader) Close()

func (*UnorderedColumnReader) HasColumn added in v1.3.0

func (r *UnorderedColumnReader) HasColumn() bool

func (*UnorderedColumnReader) MatchSeries added in v1.3.0

func (r *UnorderedColumnReader) MatchSeries(sid uint64) bool

func (*UnorderedColumnReader) Read added in v1.0.0

func (r *UnorderedColumnReader) Read(sid uint64, maxTime int64) (*record.ColVal, []int64, error)

func (*UnorderedColumnReader) ReadSchemas added in v1.3.0

func (r *UnorderedColumnReader) ReadSchemas(sid uint64, maxTime int64, dst map[string]record.Field)

func (*UnorderedColumnReader) ReadTime added in v1.3.0

func (r *UnorderedColumnReader) ReadTime(sid uint64, maxTime int64) []int64

type UnorderedColumns added in v1.3.0

type UnorderedColumns struct {
	// contains filtered or unexported fields
}

func NewUnorderedColumns added in v1.3.0

func NewUnorderedColumns() *UnorderedColumns

func (*UnorderedColumns) ChangeColumn added in v1.3.0

func (c *UnorderedColumns) ChangeColumn(name string) *UnorderedColumn

func (*UnorderedColumns) GetLineOffset added in v1.3.0

func (c *UnorderedColumns) GetLineOffset(name string) int

func (*UnorderedColumns) GetSegOffset added in v1.3.0

func (c *UnorderedColumns) GetSegOffset(name string) int

func (*UnorderedColumns) IncrLineOffset added in v1.3.0

func (c *UnorderedColumns) IncrLineOffset(name string, n int)

func (*UnorderedColumns) IncrSegOffset added in v1.3.0

func (c *UnorderedColumns) IncrSegOffset(name string, n int)

func (*UnorderedColumns) Init added in v1.3.0

func (c *UnorderedColumns) Init(cm *ChunkMeta)

func (*UnorderedColumns) ReadCompleted added in v1.3.0

func (c *UnorderedColumns) ReadCompleted() bool

func (*UnorderedColumns) SetRemainLine added in v1.3.0

func (c *UnorderedColumns) SetRemainLine(n int)

func (*UnorderedColumns) TimeMeta added in v1.3.0

func (c *UnorderedColumns) TimeMeta() *ColumnMeta

func (*UnorderedColumns) Walk added in v1.3.0

func (c *UnorderedColumns) Walk(callback func(meta *ColumnMeta))

type UnorderedReader added in v1.0.0

type UnorderedReader struct {
	// contains filtered or unexported fields
}

func NewUnorderedReader added in v1.0.0

func NewUnorderedReader(log *logger.Logger) *UnorderedReader

func (*UnorderedReader) AddFiles added in v1.0.0

func (r *UnorderedReader) AddFiles(files []TSSPFile)

func (*UnorderedReader) AllocNilCol added in v1.1.0

func (r *UnorderedReader) AllocNilCol(size int, ref *record.Field) *record.ColVal

func (*UnorderedReader) ChangeColumn added in v1.3.0

func (r *UnorderedReader) ChangeColumn(sid uint64, ref *record.Field)

func (*UnorderedReader) ChangeSeries added in v1.3.0

func (r *UnorderedReader) ChangeSeries(sid uint64) error

func (*UnorderedReader) Close added in v1.0.0

func (r *UnorderedReader) Close()

func (*UnorderedReader) CloseFile added in v1.3.0

func (r *UnorderedReader) CloseFile()

func (*UnorderedReader) HasSeries added in v1.1.0

func (r *UnorderedReader) HasSeries(sid uint64) bool

func (*UnorderedReader) InitTimes added in v1.0.1

func (r *UnorderedReader) InitTimes(sid uint64, maxTime int64)

InitTimes initialize the time column of unordered data

func (*UnorderedReader) Read added in v1.0.0

func (r *UnorderedReader) Read(sid uint64, maxTime int64) (*record.ColVal, []int64, error)

Read reads data based on the series ID, column, and time range

func (*UnorderedReader) ReadAllTimes added in v1.0.1

func (r *UnorderedReader) ReadAllTimes() []int64

func (*UnorderedReader) ReadRemain added in v1.0.0

func (r *UnorderedReader) ReadRemain(sid uint64, cb remainCallback) error

func (*UnorderedReader) ReadSeriesSchemas added in v1.0.0

func (r *UnorderedReader) ReadSeriesSchemas(sid uint64, maxTime int64) record.Schemas

func (*UnorderedReader) ReadTimes added in v1.0.0

func (r *UnorderedReader) ReadTimes(maxTime int64) []int64

type UnorderedReaderContext added in v1.1.0

type UnorderedReaderContext struct {
	// contains filtered or unexported fields
}

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL