Documentation
¶
Overview ¶
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2024 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2024 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Index ¶
- Constants
- Variables
- func AddTSSP2ParquetProcess(files ...string)
- func AggregateData(newRec, baseRec *record.Record, ops []*comm.CallOption) bool
- func CanEncodeOneRowMode(col *record.ColVal) bool
- func CleanTempFile(f fileops.File)
- func CompactRecovery(path string, group *CompactGroup)
- func CompareT[T int | int64 | float64 | string](s1, s2 T, isAscending bool) (bool, bool)
- func CreateTSSPFileReader(size int64, fd fileops.File, trailer *Trailer, tb *TableData, ver uint64, ...) (*tsspFileReader, error)
- func DecodeAggTimes(buf []byte) ([]byte, int64, int64, error)
- func DecodeColumnHeader(col *record.ColVal, data []byte, colType uint8) ([]byte, []byte, error)
- func DecodeColumnOfOneValue(data []byte, col *record.ColVal, typ uint8)
- func DelTSSP2ParquetProcess(files ...string)
- func EncodeColumnHeader(col *record.ColVal, dst []byte, typ uint8) []byte
- func FileOperation(f TSSPFile, op func())
- func FilesMergedTire(files []TSSPFile) uint64
- func FillNilCol(col *record.ColVal, size int, ref *record.Field)
- func FilterByField(rec *record.Record, filterRec *record.Record, filterOption *BaseFilterOptions, ...) *record.Record
- func FilterByFieldFuncs(rec, filterRec *record.Record, filterOption *BaseFilterOptions, ...) *record.Record
- func FilterByOpts(rec *record.Record, opt *FilterOptions) *record.Record
- func FilterByTime(rec *record.Record, tr util.TimeRange) *record.Record
- func FilterByTimeDescend(rec *record.Record, tr util.TimeRange) *record.Record
- func FlushRemoteEnabled(tier uint64) bool
- func GenFixRowsPerSegment(data *record.Record, rowNumPerSegment int) []int
- func GenLogFileName(logSeq *uint64) string
- func GenParquetLogName() string
- func GenRecByReserveIds(rec, filterRec *record.Record, rowNumber []int, redIdxMap map[int]struct{}) *record.Record
- func GetBloomFilterBuf() *bloomFilter
- func GetChunkMetaCompressMode() uint8
- func GetCursorsBy(path *sparseindex.OBSFilterPath, tr util.TimeRange, isAscending bool) (int, uint64, error)
- func GetDetachedFlushEnabled() bool
- func GetDir(engineType config.EngineType, path string) string
- func GetMaxRowsPerSegment4TsStore() int
- func GetMergeFlag4TsStore() int32
- func GetMetaIndexChunkCount(obsOptions *obs.ObsOptions, dataPath string) (int64, error)
- func GetMetaIndexOffsetAndLengthByChunkId(chunkId int64) (offset, length int64)
- func GetPKItems(path string, obsOpts *obs.ObsOptions, miChunkIds []int64) (*colstore.DetachedPKMetaInfo, []*colstore.DetachedPKInfo, error)
- func GetPKMetaOffsetLengthByChunkId(pkMetaInfo *colstore.DetachedPKMetaInfo, chunkId int) (offset, length int64)
- func GetSortKeyColVal(fi *FileIterator, sortKey []record.Field, ctx *ReadContext, ...) ([]*record.ColVal, []record.SortItem, error)
- func GetTmpFileSuffix() string
- func InParquetProcess(files ...string) bool
- func Init()
- func InitDecFunctions()
- func InitQueryFileCache(cap uint32, enable bool)
- func InitWriterPool(size int)
- func IsChunkMetaCompressSelf() bool
- func IsFlushToFinalFile(totalSegmentCnt, flushToFinalFileLimit uint64) bool
- func IsInterfaceNil(value interface{}) bool
- func IsTempleFile(name string) bool
- func LeftBound(nums []uint32, target uint32, left int) int
- func MarshalChunkMeta(ctx *ChunkMetaCodecCtx, cm *ChunkMeta, dst []byte) ([]byte, error)
- func MarshalColumnMeta(ctx *ChunkMetaCodecCtx, col *ColumnMeta, dst []byte) []byte
- func MarshalTimeRange(ctx *ChunkMetaCodecCtx, sr []SegmentRange, dst []byte) []byte
- func MergeRecovery(path string, name string, ctx *MergeContext)
- func MergeTimes(a []int64, b []int64, dst []int64) []int64
- func NewCsImmTableImpl() *csImmTableImpl
- func NewFileLoadContext() *fileLoadContext
- func NewLastMergeTime() *lastMergeTime
- func NewMergePerformer(ur *UnorderedReader, stat *statistics.MergeStatItem) *mergePerformer
- func NewObsWriter(path, fileName string, obsOpts *obs.ObsOptions) (*obsWriter, error)
- func NewObsWriterByFd(fd fileops.File, obsOpts *obs.ObsOptions) (*obsWriter, error)
- func NewTSSPFileReader(name string, lockPath *string) (*tsspFileReader, error)
- func NewTsImmTable() *tsImmTableImpl
- func NonStreamingCompaction(fi FilesInfo) bool
- func PreAggOnlyOneRow(buf []byte) bool
- func ProcParquetLog(logDir string, lockPath *string, ctx *EventContext) error
- func PutBloomFilterBuf(key *bloomFilter)
- func PutChunkMeta(filePath string, chunkMeta *ChunkMeta)
- func PutDetachedSegmentTask(queryID string, meta IndexFrags)
- func PutIDTimePairs(pair *IdTimePairs)
- func ReadPKDataAll(path string, opts *obs.ObsOptions, offset, length []int64, ...) ([]*colstore.DetachedPKData, error)
- func ReadPKMetaAll(path string, opts *obs.ObsOptions, offset, length []int64) ([]*colstore.DetachedPKMeta, error)
- func ReadPKMetaInfoAll(path string, opts *obs.ObsOptions) (*colstore.DetachedPKMetaInfo, error)
- func ReadReliabilityLog(file string, dst interface{}) error
- func RefFilesReader(files ...TSSPFile)
- func RegistryDDLRespData(ddl hybridqp.DDLType, f GenDDLRespDataFunc)
- func RegistryDDLSequenceHandler(ddl hybridqp.DDLType, f GenSequenceHandlerFunc)
- func ReleaseColumnBuilder(b PreAggBuilder)
- func ReloadSpecifiedFiles(m *MmsTables, mst string, tsspFiles *TSSPFiles)
- func RemoveTsspSuffix(dataPath string) string
- func RenameIndexFiles(fname string, indexList []string) error
- func RenameTmpFiles(newFiles []TSSPFile) error
- func RenameTmpFilesWithPKIndex(newFiles []TSSPFile, ir *influxql.IndexRelation) error
- func RenameTmpFullTextIdxFile(msb *MsBuilder) error
- func ResetAggregateData(newRec *record.Record, ops []*comm.CallOption)
- func ResetQueryFileCache()
- func SaveReliabilityLog(data interface{}, dir string, lockFile string, nameGenerator func() string) (string, error)
- func SearchChunkMetaBlock(data []byte, itemCount uint32, sid uint64) []byte
- func SetChunkMetaCompressMode(mode int)
- func SetCompactLimit(bytesPerSec int64, burstLimit int64)
- func SetCompactionEnabled(compactionEnabled bool)
- func SetDetachedFlushEnabled(detachFlushEnabled bool)
- func SetFragmentsNumPerFlush(fragmentsNumPerFlush int)
- func SetIndexCompressMode(mode int)
- func SetMaxCompactor(n int)
- func SetMaxFullCompactor(n int)
- func SetMaxRowsPerSegment4TsStore(maxRowsPerSegmentLimit int)
- func SetMaxSegmentLimit4TsStore(limit int)
- func SetMergeFlag4TsStore(v int32)
- func SetSnapshotLimit(bytesPerSec int64, burstLimit int64)
- func SetSnapshotTblNum(snapshotTblNum int)
- func SnapshotLimit() bool
- func SumFilesSize(files []TSSPFile) int64
- func TimeSorted(sortKeys []string) bool
- func UnmarshalChunkMeta(ctx *ChunkMetaCodecCtx, cm *ChunkMeta, buf []byte) ([]byte, error)
- func UnmarshalChunkMetaAdaptive(ctx *ChunkMetaCodecCtx, cm *ChunkMeta, columns []string, buf []byte) ([]byte, error)
- func UnmarshalChunkMetaBaseAttr(ctx *ChunkMetaCodecCtx, cm *ChunkMeta, buf []byte) ([]byte, error)
- func UnmarshalChunkMetaWithColumns(ctx *ChunkMetaCodecCtx, cm *ChunkMeta, columns []string, buf []byte) ([]byte, error)
- func UnmarshalColumnMeta(ctx *ChunkMetaCodecCtx, segCount int, col *ColumnMeta, buf []byte) ([]byte, error)
- func UnmarshalColumnMetaWithoutName(ctx *ChunkMetaCodecCtx, segCount int, col *ColumnMeta, buf []byte) ([]byte, error)
- func UnmarshalColumnName(ctx *ChunkMetaCodecCtx, buf []byte) ([]byte, string)
- func UnmarshalTimeRange(ctx *ChunkMetaCodecCtx, cm *ChunkMeta, buf []byte) ([]byte, bool)
- func UnrefFiles(files ...TSSPFile)
- func UnrefFilesReader(files ...TSSPFile)
- func UpdateChunkMetaFunc(_, _ cache.Entry) bool
- func UpdateDetachedMetaDataCache(old, new cache.Entry) bool
- func UseIndexCompressWriter() bool
- func WriteIntoFile(msb *MsBuilder, tmp bool, withPKIndex bool, ir *influxql.IndexRelation) error
- type AccumulateMetaIndex
- type BaseFilterOptions
- type BloomFilterIterator
- type BooleanPreAgg
- type BufferReader
- type CSParquetManager
- type CSParquetPlan
- type CSParquetTask
- type ChunkDataBuilder
- type ChunkIterator
- type ChunkIterators
- func (c *ChunkIterators) Close()
- func (c *ChunkIterators) Len() int
- func (c *ChunkIterators) Less(i, j int) bool
- func (c *ChunkIterators) Next() (uint64, *record.Record, error)
- func (c *ChunkIterators) Pop() interface{}
- func (c *ChunkIterators) Push(v interface{})
- func (c *ChunkIterators) Swap(i, j int)
- func (c *ChunkIterators) WithLog(log *Log.Logger)
- type ChunkMeta
- func (m *ChunkMeta) AllocColMeta(ref *record.Field) *ColumnMeta
- func (m *ChunkMeta) Clone() *ChunkMeta
- func (m *ChunkMeta) DelEmptyColMeta()
- func (m *ChunkMeta) GetColMeta() []ColumnMeta
- func (m *ChunkMeta) GetSid() (sid uint64)
- func (m *ChunkMeta) GetTimeRangeBy(index int) SegmentRange
- func (m *ChunkMeta) Len() int
- func (m *ChunkMeta) Less(i, j int) bool
- func (m *ChunkMeta) MinMaxTime() (min int64, max int64)
- func (m *ChunkMeta) Rows(ab PreAggBuilder) int
- func (m *ChunkMeta) SegmentCount() int
- func (m *ChunkMeta) Size() int
- func (m *ChunkMeta) Swap(i, j int)
- func (m *ChunkMeta) TimeMeta() *ColumnMeta
- func (m *ChunkMeta) UnmarshalWithColumns(src []byte, columns []string) ([]byte, error)
- func (m *ChunkMeta) Validation()
- type ChunkMetaCodecCtx
- func (ctx *ChunkMetaCodecCtx) GetHeader() *ChunkMetaHeader
- func (ctx *ChunkMetaCodecCtx) GetIndex(val string) uint64
- func (ctx *ChunkMetaCodecCtx) GetValue(idx int) string
- func (ctx *ChunkMetaCodecCtx) MemSize() int
- func (ctx *ChunkMetaCodecCtx) Release()
- func (ctx *ChunkMetaCodecCtx) SetTrailer(trailer *Trailer)
- type ChunkMetaContext
- type ChunkMetaEntry
- type ChunkMetaHeader
- func (h *ChunkMetaHeader) AppendValue(val string)
- func (h *ChunkMetaHeader) CopyTo(dst *ChunkMetaHeader)
- func (h *ChunkMetaHeader) GetValue(idx int) string
- func (h *ChunkMetaHeader) Len() int
- func (h *ChunkMetaHeader) Marshal(dst []byte) []byte
- func (h *ChunkMetaHeader) Reset()
- func (h *ChunkMetaHeader) Unmarshal(buf []byte)
- type ColAux
- type ColumnBuilder
- func (b *ColumnBuilder) BuildPreAgg()
- func (b *ColumnBuilder) EncodeColumn(ref record.Field, col *record.ColVal, timeCols []record.ColVal, ...) ([]byte, error)
- func (b *ColumnBuilder) EncodeColumnBySize(ref record.Field, col *record.ColVal, timeCols []record.ColVal, ...) ([]byte, error)
- func (b *ColumnBuilder) SetEncodeMode(detached bool)
- type ColumnIterator
- func (itr *ColumnIterator) Close()
- func (itr *ColumnIterator) Error() error
- func (itr *ColumnIterator) IncrChunkUsed()
- func (itr *ColumnIterator) IterCurrentChunk(p ColumnIteratorPerformer) error
- func (itr *ColumnIterator) NextChunkMeta() bool
- func (itr *ColumnIterator) NextColumn(colIdx int) (*record.Field, bool)
- func (itr *ColumnIterator) PutCol(col *record.ColVal)
- func (itr *ColumnIterator) Run(p ColumnIteratorPerformer) error
- type ColumnIteratorPerformer
- type ColumnMeta
- func (m *ColumnMeta) Clone() ColumnMeta
- func (m *ColumnMeta) Equal(name string, ty int) bool
- func (m *ColumnMeta) GetPreAgg() []byte
- func (m *ColumnMeta) GetSegment(i int) (int64, uint32)
- func (m *ColumnMeta) IsTime() bool
- func (m *ColumnMeta) Name() string
- func (m *ColumnMeta) RowCount(ref *record.Field, decs *ReadContext) (int64, error)
- func (m *ColumnMeta) Type() uint8
- type ColumnReader
- type CompactGroup
- type CompactGroupBuilder
- type CompactTask
- type CompactedFileInfo
- type Config
- func (c *Config) GetCompactionEnabled() bool
- func (c *Config) GetMaxRowsPerSegment() int
- func (c *Config) GetMaxSegmentLimit() int
- func (c *Config) SetExpectedSegmentSize(n uint32)
- func (c *Config) SetFilesLimit(n int64)
- func (c *Config) SetMaxRowsPerSegment(maxRowsPerSegmentLimit int)
- func (c *Config) SetMaxSegmentLimit(n int)
- type CsChunkDataImp
- func (c *CsChunkDataImp) EncodeChunk(b *ChunkDataBuilder, id uint64, offset int64, rec *record.Record, dst []byte, ...) ([]byte, error)
- func (c *CsChunkDataImp) EncodeChunkForCompaction(b *ChunkDataBuilder, offset int64, rec *record.Record, dst []byte, ...) ([]byte, error)
- func (c *CsChunkDataImp) SetAccumulateRowsIndex(accumulateRowsIndex []int)
- func (c *CsChunkDataImp) SetDetachedInfo(writeDetached bool)
- type DDLBasePlan
- type DDLRespData
- type DetachedChunkMetaReader
- type DetachedMetaDataReader
- type DetachedMetaIndexReader
- type DetachedPKDataReader
- type DetachedPKMetaInfoReader
- type DetachedPKMetaReader
- type DetachedSegmentEntry
- func (e *DetachedSegmentEntry) GetKey() string
- func (e *DetachedSegmentEntry) GetTime() time.Time
- func (e *DetachedSegmentEntry) GetValue() interface{}
- func (e *DetachedSegmentEntry) SetTime(time time.Time)
- func (e *DetachedSegmentEntry) SetValue(value interface{})
- func (e *DetachedSegmentEntry) Size() int64
- type EncodeChunkData
- type EncodeColumnMode
- type EngineShard
- type Event
- type EventBus
- type EventContext
- type EventType
- type Events
- func (es *Events) Finish(success bool, ctx *EventContext)
- func (es *Events) Instance() *Events
- func (es *Events) Register(e Event)
- func (es *Events) TriggerNewFile(f TSSPFile)
- func (es *Events) TriggerReplaceFile(shardDir, lock string) error
- func (es *Events) TriggerWriteChunkMeta(cm *ChunkMeta)
- func (es *Events) TriggerWriteRecord(rec *record.Record)
- type ExtraData
- func (e *ExtraData) CopyTo(dst *ExtraData)
- func (e *ExtraData) EnableTimeStore()
- func (e *ExtraData) MarshalExtraData(dst []byte) []byte
- func (e *ExtraData) Reset()
- func (e *ExtraData) SetChunkMetaCompressFlag(v uint8)
- func (e *ExtraData) SetChunkMetaHeader(header *ChunkMetaHeader)
- func (e *ExtraData) UnmarshalExtraData(src []byte) ([]byte, error)
- type FileInfoExtend
- type FileIterator
- type FileIterators
- type FileReader
- type FileReaderContext
- type FileSwapper
- type FilesInfo
- type FilterOptions
- type FirstLastReader
- func (r *FirstLastReader) Init(cm *ChunkMeta, cr ColumnReader, ref *record.Field, dst *record.Record, ...) *FirstLastReader
- func (r *FirstLastReader) Read(ctx *ReadContext, copied bool, ioPriority int) error
- func (r *FirstLastReader) ReadMaxFromPreAgg(colMeta *ColumnMeta) (interface{}, int64, bool)
- func (r *FirstLastReader) ReadMinFromPreAgg(colMeta *ColumnMeta) (interface{}, int64, bool)
- func (r *FirstLastReader) Release()
- type FloatPreAgg
- type FragmentIterator
- type FragmentIterators
- func (f *FragmentIterators) Close()
- func (f *FragmentIterators) CompareWithBreakPoint(curPos, breakPos int) bool
- func (f *FragmentIterators) IsEmpty() bool
- func (f *FragmentIterators) Len() int
- func (f *FragmentIterators) Less(i, j int) bool
- func (f *FragmentIterators) Pop() interface{}
- func (f *FragmentIterators) Push(v interface{})
- func (f *FragmentIterators) Swap(i, j int)
- func (f *FragmentIterators) WithLog(log *Log.Logger)
- type FragmentIteratorsPool
- type GenDDLRespDataFunc
- type GenSequenceHandlerFunc
- type HotFileManager
- func (m *HotFileManager) Add(f TSSPFile)
- func (m *HotFileManager) AddAll(files []TSSPFile)
- func (m *HotFileManager) AllocLoadMemory(size int64) bool
- func (m *HotFileManager) BackgroundFree()
- func (m *HotFileManager) Free()
- func (m *HotFileManager) InHotDuration(f TSSPFile) bool
- func (m *HotFileManager) IncrMemorySize(size int64)
- func (m *HotFileManager) Run()
- func (m *HotFileManager) SetMaxMemorySize(size int64)
- func (m *HotFileManager) Stop()
- type HotFileReader
- type HotFileWriter
- func (w *HotFileWriter) AppendChunkMetaToData() error
- func (w *HotFileWriter) BuildHotFileReader(r fileops.BasicFileReader) *HotFileReader
- func (w *HotFileWriter) MemSize() int
- func (w *HotFileWriter) Release()
- func (w *HotFileWriter) WriteChunkMeta(b []byte) (int, error)
- func (w *HotFileWriter) WriteData(b []byte) (int, error)
- type IdTimePairs
- func (p *IdTimePairs) Add(id uint64, tm int64)
- func (p *IdTimePairs) AddRowCounts(rowCounts int64)
- func (p *IdTimePairs) Len() int
- func (p *IdTimePairs) Marshal(encTimes bool, dst []byte, ctx *encoding.CoderContext) []byte
- func (p *IdTimePairs) Reset(name string)
- func (p *IdTimePairs) UnmarshalBlocks(decTimes bool, src []byte, startIdx int, decoder *encoding.CoderContext) (int, int, int, error)
- func (p *IdTimePairs) UnmarshalHeader(src []byte) (uint32, error)
- type ImmTable
- type IndexCompressWriter
- func (w *IndexCompressWriter) BlockSize() int
- func (w *IndexCompressWriter) Close() error
- func (w *IndexCompressWriter) CopyTo(to io.Writer) (int, error)
- func (w *IndexCompressWriter) GetWriter() *bufio.Writer
- func (w *IndexCompressWriter) Init(name string, lock *string, cacheMeta bool, limitCompact bool)
- func (w *IndexCompressWriter) MetaDataBlocks(dst [][]byte) [][]byte
- func (w *IndexCompressWriter) Size() int
- func (w *IndexCompressWriter) SwitchMetaBuffer() (int, error)
- func (w *IndexCompressWriter) Write(p []byte) (int, error)
- type IndexFrags
- type IndexMergeSet
- type IndexWriter
- type IntegerPreAgg
- type IteratorByBlock
- type IteratorByRow
- type Location
- func (l *Location) AscendingDone()
- func (l *Location) Contains(sid uint64, tr util.TimeRange, ctx *ChunkMetaContext) (bool, error)
- func (l *Location) DescendingDone()
- func (l *Location) GetChunkMeta() *ChunkMeta
- func (l *Location) ReadData(filterOpts *FilterOptions, dst *record.Record, filterDst *record.Record) (*record.Record, error)
- func (l *Location) ResetMeta()
- func (l *Location) SetChunkMeta(chunkMeta *ChunkMeta)
- func (d *Location) SetClosedSignal(s *bool)
- func (l *Location) SetFragmentRanges(frs []*fragment.FragmentRange)
- type LocationCursor
- func (l *LocationCursor) AddFilterRecPool(pool *record.CircularRecordPool)
- func (l *LocationCursor) AddLocation(loc *Location)
- func (l *LocationCursor) Close()
- func (l *LocationCursor) FragmentCount() int
- func (l *LocationCursor) Len() int
- func (l *LocationCursor) Less(i, j int) bool
- func (l *LocationCursor) ReadData(filterOpts *FilterOptions, dst *record.Record, ...) (*record.Record, error)
- func (l *LocationCursor) ReadMeta(filterOpts *FilterOptions, dst *record.Record, ...) (*record.Record, error)
- func (l *LocationCursor) ReadOutOfOrderMeta(filterOpts *FilterOptions, dst *record.Record) (*record.Record, error)
- func (l *LocationCursor) Reset()
- func (l *LocationCursor) Reverse()
- func (l *LocationCursor) RowCount() int
- func (l *LocationCursor) Swap(i, j int)
- type MatchAllOperator
- type MeasurementInProcess
- type MergeColPool
- type MergeContext
- func (ctx *MergeContext) AddUnordered(f TSSPFile)
- func (ctx *MergeContext) Limited() bool
- func (ctx *MergeContext) MergeSelf() bool
- func (ctx *MergeContext) MergeSelfFast() bool
- func (ctx *MergeContext) Release()
- func (ctx *MergeContext) Sort()
- func (ctx *MergeContext) ToLevel() uint16
- func (ctx *MergeContext) UnorderedLen() int
- func (ctx *MergeContext) UpdateLevel(l uint16)
- type MergePerformers
- func (c *MergePerformers) Close()
- func (c *MergePerformers) Closed() bool
- func (c *MergePerformers) Len() int
- func (c *MergePerformers) Less(i, j int) bool
- func (c *MergePerformers) Next() error
- func (c *MergePerformers) Pop() interface{}
- func (c *MergePerformers) Push(v interface{})
- func (c *MergePerformers) Release()
- func (c *MergePerformers) Swap(i, j int)
- type MergeSelf
- type MergeSelfParquetEvent
- type MetaControl
- type MetaData
- type MetaDataInfo
- type MetaDatas
- type MetaIndex
- type MetaQueue
- type MetaStack
- type MmsIdTime
- type MmsReaders
- type MmsTables
- func (m *MmsTables) AddBothTSSPFiles(flushed *bool, name string, orderFiles []TSSPFile, unorderFiles []TSSPFile)
- func (m *MmsTables) AddPKFile(mstName string, file string, rec *record.Record, mark fragment.IndexFragment, ...)
- func (m *MmsTables) AddRowCountsBySid(measurement string, sid uint64, rowCounts int64)
- func (m *MmsTables) AddTSSPFiles(name string, isOrder bool, files ...TSSPFile)
- func (m *MmsTables) AddTable(mb *MsBuilder, isOrder bool, tmp bool)
- func (m *MmsTables) Close() error
- func (m *MmsTables) CompactDone(files []string)
- func (m *MmsTables) CompactionDisable()
- func (m *MmsTables) CompactionEnable()
- func (m *MmsTables) CompactionEnabled() bool
- func (m *MmsTables) CopyCSFiles(name string) []TSSPFile
- func (m *MmsTables) DisableCompAndMerge()
- func (m *MmsTables) DropMeasurement(_ context.Context, name string) error
- func (m *MmsTables) EnableCompAndMerge()
- func (m *MmsTables) File(mstName string, fileName string, isOrder bool) TSSPFile
- func (m *MmsTables) FreeAllMemReader()
- func (m *MmsTables) FreeSequencer() bool
- func (m *MmsTables) FullCompact(shid uint64) error
- func (m *MmsTables) FullyCompacted() bool
- func (m *MmsTables) GetAllMstList() []string
- func (m *MmsTables) GetBothFilesRef(measurement string, hasTimeFilter bool, tr util.TimeRange, flushed *bool) ([]TSSPFile, []TSSPFile, bool)
- func (m *MmsTables) GetCSFiles(name string) (files *TSSPFiles, ok bool)
- func (m *MmsTables) GetFileSeq() uint64
- func (m *MmsTables) GetMstFileStat() *statistics.FileStat
- func (m *MmsTables) GetMstInfo(name string) (*meta.MeasurementInfo, bool)
- func (m *MmsTables) GetObsOption() *obs.ObsOptions
- func (m *MmsTables) GetOutOfOrderFileNum() int
- func (m *MmsTables) GetPKFile(mstName string, file string) (pkInfo *colstore.PKInfo, ok bool)
- func (m *MmsTables) GetRowCountsBySid(measurement string, sid uint64) (int64, error)
- func (m *MmsTables) GetShardID() uint64
- func (m *MmsTables) GetTSSPFiles(name string, isOrder bool) (files *TSSPFiles, ok bool)
- func (m *MmsTables) GetTableFileNum(name string, order bool) int
- func (m *MmsTables) IsOutOfOrderFilesExist() bool
- func (m *MmsTables) LevelCompact(level uint16, shid uint64) error
- func (m *MmsTables) Listen(signal chan struct{}, onClose func())
- func (m *MmsTables) LoadSequencer()
- func (m *MmsTables) MergeDisable()
- func (m *MmsTables) MergeEnable()
- func (m *MmsTables) MergeEnabled() bool
- func (m *MmsTables) MergeOutOfOrder(shId uint64, full bool, force bool) error
- func (m *MmsTables) NewChunkIterators(group FilesInfo) *ChunkIterators
- func (m *MmsTables) NewStreamIterators(group FilesInfo) *StreamIterators
- func (m *MmsTables) NewStreamWriteFile(mst string) *StreamWriteFile
- func (m *MmsTables) NextSequence() uint64
- func (m *MmsTables) Open() (int64, error)
- func (m *MmsTables) ReloadSequencer(seq *Sequencer, async bool)
- func (m *MmsTables) RenameFileToLevel(plan *CompactGroup) error
- func (m *MmsTables) ReplaceDownSampleFiles(mstNames []string, originFiles [][]TSSPFile, newFiles [][]TSSPFile, ...) (err error)
- func (m *MmsTables) ReplaceFiles(name string, oldFiles, newFiles []TSSPFile, isOrder bool) (err error)
- func (m *MmsTables) ReplacePKFile(mstName string, file string, rec *record.Record, mark fragment.IndexFragment, ...) error
- func (m *MmsTables) Sequencer() *Sequencer
- func (m *MmsTables) SeriesTotal() uint64
- func (m *MmsTables) SetAccumulateMetaIndex(name string, aMetaIndex *AccumulateMetaIndex)
- func (m *MmsTables) SetAddFunc(addFunc func(int64))
- func (m *MmsTables) SetImmTableType(engineType config.EngineType)
- func (m *MmsTables) SetIndexMergeSet(idx IndexMergeSet)
- func (m *MmsTables) SetLockPath(lock *string)
- func (m *MmsTables) SetMstInfo(name string, mstInfo *meta.MeasurementInfo)
- func (m *MmsTables) SetObsOption(option *obs.ObsOptions)
- func (m *MmsTables) SetOpId(shardId uint64, opId uint64)
- func (m *MmsTables) SetTier(tier uint64)
- func (m *MmsTables) Tier() uint64
- func (m *MmsTables) Wait()
- type MsBuilder
- func (b *MsBuilder) BloomFilterNeedDetached(filterDetachedWriteTimes int) bool
- func (b *MsBuilder) CloseIndexWriters() error
- func (b *MsBuilder) FileVersion() uint64
- func (b *MsBuilder) Flush() error
- func (b *MsBuilder) GetChunkBuilder() *ChunkDataBuilder
- func (b *MsBuilder) GetFullTextIdx() bool
- func (b *MsBuilder) GetIndexBuilder() *index.IndexWriterBuilder
- func (b *MsBuilder) GetLocalBfCount() int64
- func (b *MsBuilder) GetPKInfoNum() int
- func (b *MsBuilder) GetPKMark(i int) fragment.IndexFragment
- func (b *MsBuilder) GetPKRecord(i int) *record.Record
- func (b *MsBuilder) MaxRowsPerSegment() int
- func (b *MsBuilder) Name() string
- func (b *MsBuilder) NewIndexWriterBuilder(schema record.Schemas, indexRelation influxql.IndexRelation)
- func (b *MsBuilder) NewPKIndexWriter()
- func (b *MsBuilder) NewTSSPFile(tmp bool) (TSSPFile, error)
- func (b *MsBuilder) Reset()
- func (b *MsBuilder) SetEncodeChunkDataImp(engineType config.EngineType)
- func (b *MsBuilder) SetFullTextIdx(fullTextIdx bool)
- func (b *MsBuilder) SetLocalBfCount(count int64)
- func (b *MsBuilder) SetTCLocation(tcLocation int8)
- func (b *MsBuilder) SetTimeSorted(timeSorted bool)
- func (b *MsBuilder) SetToHot()
- func (b *MsBuilder) Size() int64
- func (b *MsBuilder) StoreTimes()
- func (b *MsBuilder) SwitchChunkMeta() error
- func (b *MsBuilder) WithLog(log *logger.Logger)
- func (b *MsBuilder) WriteChunkMeta(cm *ChunkMeta) (int, error)
- func (b *MsBuilder) WriteData(id uint64, data *record.Record) error
- func (b *MsBuilder) WriteDetached(id uint64, data *record.Record, pkSchema record.Schemas, firstFlush bool, ...) error
- func (b *MsBuilder) WriteDetachedIndex(writeRec *record.Record, rowsPerSegment []int) error
- func (b *MsBuilder) WriteDetachedMetaAndIndex(writeRec *record.Record, pkSchema record.Schemas, firstFlush bool, ...) error
- func (b *MsBuilder) WriteRecord(id uint64, data *record.Record, ...) (*MsBuilder, error)
- func (b *MsBuilder) WriteRecordByCol(id uint64, data *record.Record, schema record.Schemas, ...) (*MsBuilder, error)
- type PageCacheReader
- func (pcr *PageCacheReader) GetCachePageIdsAndOffsets(start int64, size uint32) ([]int64, []int64, error)
- func (pcr *PageCacheReader) GetMaxPageIdAndOffset() (int64, int64)
- func (pcr *PageCacheReader) Init()
- func (pcr *PageCacheReader) Read(offset int64, size uint32, buf *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)
- func (pcr *PageCacheReader) ReadFixPageSize(offset int64, size uint32, buf *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)
- func (pcr *PageCacheReader) ReadSinglePage(cacheKey string, pageOffset int64, pageSize int64, buf *[]byte, ioPriority int) (*readcache.CachePage, []byte, error)
- func (pcr *PageCacheReader) ReadVariablePageSize(offset int64, size uint32, buf *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)
- type ParquetTask
- type PreAggBuilder
- type PreAggBuilders
- type QueryfileCache
- type ReadContext
- func (d *ReadContext) GetCoder() *encoding.CoderContext
- func (d *ReadContext) GetOps() []*comm.CallOption
- func (d *ReadContext) GetReadBuff() []byte
- func (d *ReadContext) InitPreAggBuilder()
- func (d *ReadContext) IsAborted() bool
- func (d *ReadContext) MatchPreAgg() bool
- func (d *ReadContext) Release()
- func (d *ReadContext) Reset()
- func (d *ReadContext) Set(ascending bool, tr util.TimeRange, onlyFirstOrLast bool, ...)
- func (d *ReadContext) SetClosedSignal(s *bool)
- func (d *ReadContext) SetOps(c []*comm.CallOption)
- func (d *ReadContext) SetSpan(readSpan, filterSpan *tracing.Span)
- func (d *ReadContext) SetTr(tr util.TimeRange)
- type Segment
- type SegmentMeta
- type SegmentRange
- type SegmentReader
- type SegmentSequenceReader
- type SegmentTask
- type SequenceIterator
- type SequenceIteratorChunkMetaReader
- type SequenceIteratorHandler
- type Sequencer
- func (s *Sequencer) AddRowCounts(mn string, id uint64, rowCounts int64)
- func (s *Sequencer) BatchUpdateCheckTime(p *IdTimePairs, incrRows bool)
- func (s *Sequencer) DelMmsIdTime(name string)
- func (s *Sequencer) Get(mn string, id uint64) (lastFlushTime, rowCnt int64)
- func (s *Sequencer) GetMmsIdTime(name string) *MmsIdTime
- func (s *Sequencer) IsLoading() bool
- func (s *Sequencer) ResetMmsIdTime()
- func (s *Sequencer) SeriesTotal() uint64
- func (s *Sequencer) SetStat(free, loading bool)
- func (s *Sequencer) SetToInLoading() bool
- func (s *Sequencer) UnRef()
- type SeriesCounter
- type SeriesKeys
- type SeriesKeysIteratorHandler
- func (h *SeriesKeysIteratorHandler) Begin()
- func (h *SeriesKeysIteratorHandler) Finish()
- func (h *SeriesKeysIteratorHandler) Init(param map[string]interface{}) error
- func (h *SeriesKeysIteratorHandler) Limited() bool
- func (h *SeriesKeysIteratorHandler) NextChunkMeta(cm *ChunkMeta) error
- func (h *SeriesKeysIteratorHandler) NextFile(TSSPFile)
- type SortKeyIterator
- type SortLimitCursor
- func (t *SortLimitCursor) Close() error
- func (t *SortLimitCursor) EndSpan()
- func (t *SortLimitCursor) GetInput() comm.TimeCutKeyCursor
- func (t *SortLimitCursor) GetSchema() record.Schemas
- func (t *SortLimitCursor) Name() string
- func (t *SortLimitCursor) Next() (*record.Record, comm.SeriesInfoIntf, error)
- func (t *SortLimitCursor) NextAggData() (*record.Record, *comm.FileInfo, error)
- func (t *SortLimitCursor) SetOps(ops []*comm.CallOption)
- func (t *SortLimitCursor) SinkPlan(plan hybridqp.QueryNode)
- func (t *SortLimitCursor) StartSpan(span *tracing.Span)
- type SortLimitRows
- type StreamCompactParquetEvent
- type StreamIterator
- type StreamIterators
- func (c *StreamIterators) Close()
- func (c *StreamIterators) FileVersion() uint64
- func (c *StreamIterators) Flush() error
- func (c *StreamIterators) Init(id uint64, chunkDataOffset int64, schema record.Schemas)
- func (c *StreamIterators) InitEvents(level uint16) *Events
- func (c *StreamIterators) Len() int
- func (c *StreamIterators) Less(i, j int) bool
- func (c *StreamIterators) ListenCloseSignal(finish chan struct{})
- func (c *StreamIterators) NewFile(addFileExt bool) error
- func (c *StreamIterators) NewTSSPFile(tmp bool) (TSSPFile, error)
- func (c *StreamIterators) Pop() interface{}
- func (c *StreamIterators) Push(v interface{})
- func (c *StreamIterators) RemoveTmpFiles()
- func (c *StreamIterators) SetWriter(w fileops.FileWriter)
- func (c *StreamIterators) Size() int64
- func (c *StreamIterators) Swap(i, j int)
- func (c *StreamIterators) SwitchChunkMeta() error
- func (c *StreamIterators) WithLog(log *Log.Logger)
- func (c *StreamIterators) WriteChunkMeta(cm *ChunkMeta) (int, error)
- type StreamIteratorsPool
- type StreamWriteFile
- func (c *StreamWriteFile) AppendColumn(ref *record.Field) error
- func (c *StreamWriteFile) ChangeColumn(ref record.Field) error
- func (c *StreamWriteFile) ChangeSid(sid uint64)
- func (c *StreamWriteFile) Close(isError bool)
- func (c *StreamWriteFile) Flush() error
- func (c *StreamWriteFile) GetTSSPFile() TSSPFile
- func (c *StreamWriteFile) Init(id uint64, chunkDataOffset int64, schema record.Schemas)
- func (c *StreamWriteFile) InitFile(seq uint64) error
- func (c *StreamWriteFile) InitMergedFile(f TSSPFile) error
- func (c *StreamWriteFile) NewFile(addFileExt bool) error
- func (c *StreamWriteFile) NewTSSPFile(tmp bool) (TSSPFile, error)
- func (c *StreamWriteFile) SetValidate(en bool)
- func (c *StreamWriteFile) Size() int64
- func (c *StreamWriteFile) SortColumns()
- func (c *StreamWriteFile) SwitchChunkMeta() error
- func (c *StreamWriteFile) WriteChunkMeta(cm *ChunkMeta) (int, error)
- func (c *StreamWriteFile) WriteCurrentMeta() error
- func (c *StreamWriteFile) WriteData(id uint64, ref record.Field, col record.ColVal, timeCol *record.ColVal) error
- func (c *StreamWriteFile) WriteFile() error
- func (c *StreamWriteFile) WriteMeta(cm *ChunkMeta) error
- type StringPreAgg
- type TSSP2ParquetEvent
- func (e *TSSP2ParquetEvent) Enable() bool
- func (e *TSSP2ParquetEvent) Init(mst string, level uint16)
- func (e *TSSP2ParquetEvent) OnFinish(ctx *EventContext)
- func (e *TSSP2ParquetEvent) OnInterrupt()
- func (e *TSSP2ParquetEvent) OnNewFile(f TSSPFile)
- func (e *TSSP2ParquetEvent) OnReplaceFile(shardDir string, lockFile string) error
- type TSSP2ParquetPlan
- type TSSPFile
- type TSSPFileAttachedReader
- func (t *TSSPFileAttachedReader) Close() error
- func (t *TSSPFileAttachedReader) EndSpan()
- func (t *TSSPFileAttachedReader) GetSchema() record.Schemas
- func (t *TSSPFileAttachedReader) Name() string
- func (t *TSSPFileAttachedReader) Next() (*record.Record, comm.SeriesInfoIntf, error)
- func (t *TSSPFileAttachedReader) NextAggData() (*record.Record, *comm.FileInfo, error)
- func (t *TSSPFileAttachedReader) ResetBy(files []TSSPFile, fragRanges []fragment.FragmentRanges) error
- func (t *TSSPFileAttachedReader) SetOps(ops []*comm.CallOption)
- func (t *TSSPFileAttachedReader) SinkPlan(plan hybridqp.QueryNode)
- func (t *TSSPFileAttachedReader) StartSpan(span *tracing.Span)
- type TSSPFileDetachedReader
- func (t *TSSPFileDetachedReader) Close() error
- func (t *TSSPFileDetachedReader) EndSpan()
- func (t *TSSPFileDetachedReader) GetSchema() record.Schemas
- func (t *TSSPFileDetachedReader) Name() string
- func (t *TSSPFileDetachedReader) Next() (*record.Record, comm.SeriesInfoIntf, error)
- func (t *TSSPFileDetachedReader) NextAggData() (*record.Record, *comm.FileInfo, error)
- func (t *TSSPFileDetachedReader) ResetBy(metaIndex []*MetaIndex, blocks [][]int, ctx *FileReaderContext)
- func (t *TSSPFileDetachedReader) SetOps(ops []*comm.CallOption)
- func (t *TSSPFileDetachedReader) SinkPlan(plan hybridqp.QueryNode)
- func (t *TSSPFileDetachedReader) StartSpan(span *tracing.Span)
- func (t *TSSPFileDetachedReader) UpdateTime(time int64)
- type TSSPFileName
- func (n *TSSPFileName) Equal(other *TSSPFileName) bool
- func (n *TSSPFileName) ParseFileName(name string) error
- func (n *TSSPFileName) Path(dir string, tmp bool) string
- func (n *TSSPFileName) SetExtend(extend uint16)
- func (n *TSSPFileName) SetLevel(l uint16)
- func (n *TSSPFileName) SetMerge(merge uint16)
- func (n *TSSPFileName) SetOrder(v bool)
- func (n *TSSPFileName) SetSeq(seq uint64)
- func (n *TSSPFileName) String() string
- func (n *TSSPFileName) TmpPath(dir string) string
- type TSSPFiles
- func (f *TSSPFiles) Append(file ...TSSPFile)
- func (f *TSSPFiles) AppendReloadFiles(file ...TSSPInfo)
- func (f *TSSPFiles) Files() []TSSPFile
- func (f *TSSPFiles) Len() int
- func (f *TSSPFiles) Less(i, j int) bool
- func (f *TSSPFiles) Lock()
- func (f *TSSPFiles) MaxMerged() uint16
- func (f *TSSPFiles) MergedLevelCount(level uint16) int
- func (f *TSSPFiles) RLock()
- func (f *TSSPFiles) RUnlock()
- func (f *TSSPFiles) StopFiles()
- func (f *TSSPFiles) Swap(i, j int)
- func (f *TSSPFiles) Unlock()
- type TSSPInfo
- type TableData
- type TableReaders
- type TableStat
- type TableStoreGC
- type TablesGC
- type TablesStore
- type TagSets
- type TagValuesIteratorHandler
- func (h *TagValuesIteratorHandler) Begin()
- func (h *TagValuesIteratorHandler) Finish()
- func (h *TagValuesIteratorHandler) Init(param map[string]interface{}) error
- func (h *TagValuesIteratorHandler) Limited() bool
- func (h *TagValuesIteratorHandler) NextChunkMeta(cm *ChunkMeta) error
- func (h *TagValuesIteratorHandler) NextFile(TSSPFile)
- type TimePreAgg
- type Trailer
- func (t *Trailer) ContainsId(id uint64) bool
- func (t *Trailer) ContainsTime(tm util.TimeRange) bool
- func (t *Trailer) DataSize() int64
- func (t *Trailer) IndexSize() int64
- func (t *Trailer) Marshal(dst []byte) []byte
- func (t *Trailer) MetaIndexItemNum() int64
- func (t *Trailer) MetaIndexSize() int64
- func (t *Trailer) SetChunkMetaCompressFlag()
- func (t *Trailer) Unmarshal(src []byte) ([]byte, error)
- type TsChunkDataImp
- func (t *TsChunkDataImp) EncodeChunk(b *ChunkDataBuilder, id uint64, offset int64, rec *record.Record, dst []byte, ...) ([]byte, error)
- func (t *TsChunkDataImp) EncodeChunkForCompaction(b *ChunkDataBuilder, offset int64, rec *record.Record, dst []byte, ...) ([]byte, error)
- func (t *TsChunkDataImp) SetAccumulateRowsIndex(accumulateRowsIndex []int)
- func (t *TsChunkDataImp) SetDetachedInfo(writeDetached bool)
- type UnnestOperator
- type UnorderedColumn
- type UnorderedColumnReader
- func (r *UnorderedColumnReader) ChangeColumn(sid uint64, ref *record.Field)
- func (r *UnorderedColumnReader) ChangeSeries(sid uint64) error
- func (r *UnorderedColumnReader) Close()
- func (r *UnorderedColumnReader) HasColumn() bool
- func (r *UnorderedColumnReader) MatchSeries(sid uint64) bool
- func (r *UnorderedColumnReader) Read(sid uint64, maxTime int64) (*record.ColVal, []int64, error)
- func (r *UnorderedColumnReader) ReadSchemas(sid uint64, maxTime int64, dst map[string]record.Field)
- func (r *UnorderedColumnReader) ReadTime(sid uint64, maxTime int64) []int64
- type UnorderedColumns
- func (c *UnorderedColumns) ChangeColumn(name string) *UnorderedColumn
- func (c *UnorderedColumns) GetLineOffset(name string) int
- func (c *UnorderedColumns) GetSegOffset(name string) int
- func (c *UnorderedColumns) IncrLineOffset(name string, n int)
- func (c *UnorderedColumns) IncrSegOffset(name string, n int)
- func (c *UnorderedColumns) Init(cm *ChunkMeta)
- func (c *UnorderedColumns) ReadCompleted() bool
- func (c *UnorderedColumns) SetRemainLine(n int)
- func (c *UnorderedColumns) TimeMeta() *ColumnMeta
- func (c *UnorderedColumns) Walk(callback func(meta *ColumnMeta))
- type UnorderedReader
- func (r *UnorderedReader) AddFiles(files []TSSPFile)
- func (r *UnorderedReader) AllocNilCol(size int, ref *record.Field) *record.ColVal
- func (r *UnorderedReader) ChangeColumn(sid uint64, ref *record.Field)
- func (r *UnorderedReader) ChangeSeries(sid uint64) error
- func (r *UnorderedReader) Close()
- func (r *UnorderedReader) CloseFile()
- func (r *UnorderedReader) HasSeries(sid uint64) bool
- func (r *UnorderedReader) InitTimes(sid uint64, maxTime int64)
- func (r *UnorderedReader) Read(sid uint64, maxTime int64) (*record.ColVal, []int64, error)
- func (r *UnorderedReader) ReadAllTimes() []int64
- func (r *UnorderedReader) ReadRemain(sid uint64, cb remainCallback) error
- func (r *UnorderedReader) ReadSeriesSchemas(sid uint64, maxTime int64) record.Schemas
- func (r *UnorderedReader) ReadTimes(maxTime int64) []int64
- type UnorderedReaderContext
Constants ¶
const ( ChunkMetaSize int64 = 128 * 1024 * 1024 ChunkMetaTTL = 60 * time.Minute )
const ( QueryMetaCacheTTL = 10 * time.Minute QueryMetaDataCacheSize int64 = 50 * 1024 * 1024 * int64(MetaIndexLen+128*int(unsafe.Sizeof(fragment.FragmentRange{}))) )
const ( MetaIndexLimitNum = 16 MetaIndexHeaderSize int64 = 16 MetaIndexItemSize = int64(util.Int64SizeBytes*3 + util.Uint64SizeBytes + util.Uint32SizeBytes) )
const ( PKMetaLimitNum = 16 PKMetaPrefixSize = util.Uint64SizeBytes*2 + util.Uint32SizeBytes*2 )
const ( PKMetaInfoLength int64 = 12 PkMetaHeaderSize = int64(util.Uint32SizeBytes * 2) )
const ( PRELOAD = iota LOAD )
const ( SwapperCompressNone = 0 SwapperCompressSnappy = 1 SwapperCompressZSTD = 2 )
const ( META_DATA_N_BYTES int32 = 8 + 8 + 8 + 8 + 8 + 4 + 4 META_STORE_N_BYTES int32 = META_DATA_N_BYTES + 4 META_DATA_SIZE int32 = META_STORE_N_BYTES + 4 )
const ( MinMaxTimeLen = int(unsafe.Sizeof(SegmentRange{})) MetaIndexLen = int(unsafe.Sizeof(MetaIndex{})) DetachedMetaIndexLen = int(unsafe.Sizeof(MetaIndex{}) - 4) //count not use )
const ( InitParamKeyDst string = "dst" InitParamKeyKeys string = "keys" InitParamKeyMeasurement string = "measurement" )
const ( BLOOMFILTER_SIZE = 8 SERIESKEY_STATISTIC_SIZE = 24 COMPRESSION_RATIO = 2 )
const ( ChunkMetaReadNum = 16 BatchReaderRecordNum = 8 ReaderContentNumSpan = "reader_content_num_span" ReaderContentSizeSpan = "reader_content_size_span" ReaderContentDuration = "reader_content_duration" ReaderFilterDuration = "reader_filter_duration" )
const ( ChunkMetaCompressNone = 0 ChunkMetaCompressSnappy = 1 ChunkMetaCompressLZ4 = 2 ChunkMetaCompressSelf = 3 ChunkMetaCompressEnd = 4 )
const ( DownSampleLogDir = "downsample_log" ShardMoveLogDir = "shard_move_log" TsspDirName = "tssp" ColumnStoreDirName = obs.ColumnStoreDirName CountBinFile = "count.txt" CapacityBinFile = "capacity.txt" )
const ( DataFile = "segment.bin" ChunkMetaFile = "segment.meta" MetaIndexFile = "segment.idx" PrimaryKeyFile = "primary.idx" PrimaryMetaFile = "primary.meta" )
const (
CompactLevels = 7
)
const (
DefaultLevelMergeFileNum = 4
)
const (
FD_OUTSIDE uint32 = 0x00001
)
const MetaIndexConsumeNum = 16
const (
MetaIndexSegmentNum = 16
)
const PKDataLimitNum = 16
const (
SortLimitCursorDuration = "sort_limit_cursor_duration"
)
Variables ¶
var ( ErrCompStopped = errors.New("compact stopped") ErrDownSampleStopped = errors.New("downSample stopped") ErrDroppingMst = errors.New("measurement is dropped") ErrParquetStopped = errors.New("parquet task stopped") LevelCompactRule = []uint16{0, 1, 0, 2, 0, 3, 0, 1, 2, 3, 0, 4, 0, 5, 0, 1, 2, 6} LevelCompactRuleForCs = []uint16{0, 1, 0, 1, 0, 1} // columnStore currently only doing level 0 and level 1 compaction,but the full functionality is available LeveLMinGroupFiles = [CompactLevels]int{8, 4, 4, 4, 4, 4, 2} EnableMergeOutOfOrder = true )
var ( SegmentLen = (Segment{}).bytes() ColumnMetaLenMin = (ColumnMeta{}).bytes(1) ChunkMetaMinLen = (&ChunkMeta{}).minBytes() )
var (
CLog = Log.NewLogger(errno.ModuleCompact)
)
var ChunkMetaCache = cache.NewCache(ChunkMetaSize, ChunkMetaTTL)
var DDLRespDataFactory = make(map[hybridqp.DDLType]GenDDLRespDataFunc)
DDLRespDataFactory as a factory to use RespData
var DDLSequenceHandlerFactory = make(map[hybridqp.DDLType]GenSequenceHandlerFunc)
DDLSequenceHandlerFactory as a factory to use SequenceHandler
var DetachedMetaDataCache = cache.NewCache(QueryMetaDataCacheSize, QueryMetaCacheTTL)
var ErrDirtyLog = errors.New("incomplete log file")
var (
LevelMergeFileNum = []int{8, 8}
)
Functions ¶
func AddTSSP2ParquetProcess ¶ added in v1.4.0
func AddTSSP2ParquetProcess(files ...string)
func AggregateData ¶
func AggregateData(newRec, baseRec *record.Record, ops []*comm.CallOption) bool
func CanEncodeOneRowMode ¶ added in v1.2.0
func CleanTempFile ¶ added in v1.3.0
func CompactRecovery ¶
func CompactRecovery(path string, group *CompactGroup)
func CreateTSSPFileReader ¶
func DecodeColumnHeader ¶ added in v1.2.0
func DecodeColumnOfOneValue ¶ added in v1.2.0
func DelTSSP2ParquetProcess ¶ added in v1.4.0
func DelTSSP2ParquetProcess(files ...string)
func EncodeColumnHeader ¶ added in v1.2.0
func FileOperation ¶ added in v1.0.0
func FileOperation(f TSSPFile, op func())
func FilesMergedTire ¶ added in v1.4.0
func FilterByField ¶
func FilterByFieldFuncs ¶ added in v1.1.0
func FilterByFieldFuncs(rec, filterRec *record.Record, filterOption *BaseFilterOptions, filterBitmap *bitmap.FilterBitmap) *record.Record
func FilterByOpts ¶ added in v1.0.1
func FilterByOpts(rec *record.Record, opt *FilterOptions) *record.Record
func FilterByTimeDescend ¶
func FlushRemoteEnabled ¶ added in v1.3.0
func GenFixRowsPerSegment ¶ added in v1.2.0
func GenLogFileName ¶ added in v1.0.0
func GenParquetLogName ¶ added in v1.3.0
func GenParquetLogName() string
func GenRecByReserveIds ¶ added in v1.2.0
func GetBloomFilterBuf ¶ added in v1.2.0
func GetBloomFilterBuf() *bloomFilter
func GetChunkMetaCompressMode ¶ added in v1.2.0
func GetChunkMetaCompressMode() uint8
func GetCursorsBy ¶ added in v1.3.0
func GetCursorsBy(path *sparseindex.OBSFilterPath, tr util.TimeRange, isAscending bool) (int, uint64, error)
func GetDetachedFlushEnabled ¶ added in v1.3.0
func GetDetachedFlushEnabled() bool
func GetMaxRowsPerSegment4TsStore ¶ added in v1.1.0
func GetMaxRowsPerSegment4TsStore() int
func GetMergeFlag4TsStore ¶ added in v1.1.0
func GetMergeFlag4TsStore() int32
func GetMetaIndexChunkCount ¶ added in v1.2.0
func GetMetaIndexChunkCount(obsOptions *obs.ObsOptions, dataPath string) (int64, error)
func GetMetaIndexOffsetAndLengthByChunkId ¶ added in v1.2.0
func GetPKItems ¶ added in v1.3.0
func GetPKItems(path string, obsOpts *obs.ObsOptions, miChunkIds []int64) (*colstore.DetachedPKMetaInfo, []*colstore.DetachedPKInfo, error)
func GetPKMetaOffsetLengthByChunkId ¶ added in v1.2.0
func GetPKMetaOffsetLengthByChunkId(pkMetaInfo *colstore.DetachedPKMetaInfo, chunkId int) (offset, length int64)
func GetSortKeyColVal ¶ added in v1.1.1
func GetTmpFileSuffix ¶ added in v1.1.0
func GetTmpFileSuffix() string
func InParquetProcess ¶ added in v1.3.0
func InitDecFunctions ¶
func InitDecFunctions()
func InitQueryFileCache ¶ added in v1.1.0
func InitWriterPool ¶ added in v1.1.0
func InitWriterPool(size int)
func IsChunkMetaCompressSelf ¶ added in v1.4.0
func IsChunkMetaCompressSelf() bool
func IsFlushToFinalFile ¶ added in v1.2.0
func IsInterfaceNil ¶
func IsInterfaceNil(value interface{}) bool
func IsTempleFile ¶
func MarshalChunkMeta ¶ added in v1.4.0
func MarshalChunkMeta(ctx *ChunkMetaCodecCtx, cm *ChunkMeta, dst []byte) ([]byte, error)
func MarshalColumnMeta ¶ added in v1.4.0
func MarshalColumnMeta(ctx *ChunkMetaCodecCtx, col *ColumnMeta, dst []byte) []byte
func MarshalTimeRange ¶ added in v1.4.0
func MarshalTimeRange(ctx *ChunkMetaCodecCtx, sr []SegmentRange, dst []byte) []byte
func MergeRecovery ¶
func MergeRecovery(path string, name string, ctx *MergeContext)
func NewCsImmTableImpl ¶ added in v1.2.0
func NewCsImmTableImpl() *csImmTableImpl
func NewFileLoadContext ¶ added in v1.4.0
func NewFileLoadContext() *fileLoadContext
func NewLastMergeTime ¶ added in v1.0.0
func NewLastMergeTime() *lastMergeTime
func NewMergePerformer ¶ added in v1.0.0
func NewMergePerformer(ur *UnorderedReader, stat *statistics.MergeStatItem) *mergePerformer
func NewObsWriter ¶ added in v1.2.0
func NewObsWriter(path, fileName string, obsOpts *obs.ObsOptions) (*obsWriter, error)
func NewObsWriterByFd ¶ added in v1.2.0
func NewObsWriterByFd(fd fileops.File, obsOpts *obs.ObsOptions) (*obsWriter, error)
func NewTSSPFileReader ¶
func NewTsImmTable ¶ added in v1.2.0
func NewTsImmTable() *tsImmTableImpl
func NonStreamingCompaction ¶
func PreAggOnlyOneRow ¶ added in v1.2.0
func ProcParquetLog ¶ added in v1.3.0
func ProcParquetLog(logDir string, lockPath *string, ctx *EventContext) error
func PutBloomFilterBuf ¶ added in v1.2.0
func PutBloomFilterBuf(key *bloomFilter)
func PutChunkMeta ¶ added in v1.2.0
func PutDetachedSegmentTask ¶ added in v1.2.0
func PutDetachedSegmentTask(queryID string, meta IndexFrags)
func PutIDTimePairs ¶
func PutIDTimePairs(pair *IdTimePairs)
func ReadPKDataAll ¶ added in v1.3.0
func ReadPKDataAll(path string, opts *obs.ObsOptions, offset, length []int64, meta []*colstore.DetachedPKMeta, info *colstore.DetachedPKMetaInfo) ([]*colstore.DetachedPKData, error)
func ReadPKMetaAll ¶ added in v1.3.0
func ReadPKMetaAll(path string, opts *obs.ObsOptions, offset, length []int64) ([]*colstore.DetachedPKMeta, error)
func ReadPKMetaInfoAll ¶ added in v1.3.0
func ReadPKMetaInfoAll(path string, opts *obs.ObsOptions) (*colstore.DetachedPKMetaInfo, error)
func ReadReliabilityLog ¶ added in v1.3.0
func RefFilesReader ¶ added in v1.4.0
func RefFilesReader(files ...TSSPFile)
func RegistryDDLRespData ¶ added in v1.4.0
func RegistryDDLRespData(ddl hybridqp.DDLType, f GenDDLRespDataFunc)
func RegistryDDLSequenceHandler ¶ added in v1.4.0
func RegistryDDLSequenceHandler(ddl hybridqp.DDLType, f GenSequenceHandlerFunc)
func ReleaseColumnBuilder ¶
func ReleaseColumnBuilder(b PreAggBuilder)
func ReloadSpecifiedFiles ¶ added in v1.4.0
func RemoveTsspSuffix ¶ added in v1.1.0
func RenameIndexFiles ¶ added in v1.2.0
func RenameTmpFiles ¶
func RenameTmpFilesWithPKIndex ¶ added in v1.1.0
func RenameTmpFilesWithPKIndex(newFiles []TSSPFile, ir *influxql.IndexRelation) error
func RenameTmpFullTextIdxFile ¶ added in v1.2.0
func ResetAggregateData ¶
func ResetAggregateData(newRec *record.Record, ops []*comm.CallOption)
func ResetQueryFileCache ¶ added in v1.4.0
func ResetQueryFileCache()
ResetQueryFileCache used to reset the file cache for ut
func SaveReliabilityLog ¶ added in v1.3.0
func SearchChunkMetaBlock ¶ added in v1.4.0
func SetChunkMetaCompressMode ¶ added in v1.2.0
func SetChunkMetaCompressMode(mode int)
func SetCompactLimit ¶
func SetCompactionEnabled ¶ added in v1.2.0
func SetCompactionEnabled(compactionEnabled bool)
func SetDetachedFlushEnabled ¶ added in v1.2.0
func SetDetachedFlushEnabled(detachFlushEnabled bool)
func SetFragmentsNumPerFlush ¶ added in v1.1.1
func SetFragmentsNumPerFlush(fragmentsNumPerFlush int)
func SetIndexCompressMode ¶ added in v1.2.0
func SetIndexCompressMode(mode int)
func SetMaxCompactor ¶
func SetMaxCompactor(n int)
func SetMaxFullCompactor ¶
func SetMaxFullCompactor(n int)
func SetMaxRowsPerSegment4TsStore ¶ added in v1.1.0
func SetMaxRowsPerSegment4TsStore(maxRowsPerSegmentLimit int)
func SetMaxSegmentLimit4TsStore ¶ added in v1.1.0
func SetMaxSegmentLimit4TsStore(limit int)
func SetMergeFlag4TsStore ¶ added in v1.1.0
func SetMergeFlag4TsStore(v int32)
func SetSnapshotLimit ¶
func SetSnapshotTblNum ¶ added in v1.1.0
func SetSnapshotTblNum(snapshotTblNum int)
func SnapshotLimit ¶
func SnapshotLimit() bool
func SumFilesSize ¶ added in v1.0.0
func TimeSorted ¶ added in v1.2.0
func UnmarshalChunkMeta ¶ added in v1.4.0
func UnmarshalChunkMeta(ctx *ChunkMetaCodecCtx, cm *ChunkMeta, buf []byte) ([]byte, error)
func UnmarshalChunkMetaAdaptive ¶ added in v1.4.0
func UnmarshalChunkMetaBaseAttr ¶ added in v1.4.0
func UnmarshalChunkMetaBaseAttr(ctx *ChunkMetaCodecCtx, cm *ChunkMeta, buf []byte) ([]byte, error)
func UnmarshalChunkMetaWithColumns ¶ added in v1.4.0
func UnmarshalColumnMeta ¶ added in v1.4.0
func UnmarshalColumnMeta(ctx *ChunkMetaCodecCtx, segCount int, col *ColumnMeta, buf []byte) ([]byte, error)
func UnmarshalColumnMetaWithoutName ¶ added in v1.4.0
func UnmarshalColumnMetaWithoutName(ctx *ChunkMetaCodecCtx, segCount int, col *ColumnMeta, buf []byte) ([]byte, error)
func UnmarshalColumnName ¶ added in v1.4.0
func UnmarshalColumnName(ctx *ChunkMetaCodecCtx, buf []byte) ([]byte, string)
func UnmarshalTimeRange ¶ added in v1.4.0
func UnmarshalTimeRange(ctx *ChunkMetaCodecCtx, cm *ChunkMeta, buf []byte) ([]byte, bool)
func UnrefFiles ¶
func UnrefFiles(files ...TSSPFile)
func UnrefFilesReader ¶ added in v1.0.0
func UnrefFilesReader(files ...TSSPFile)
func UpdateChunkMetaFunc ¶ added in v1.2.0
func UpdateDetachedMetaDataCache ¶ added in v1.2.0
func UseIndexCompressWriter ¶ added in v1.4.0
func UseIndexCompressWriter() bool
func WriteIntoFile ¶ added in v1.1.1
Types ¶
type AccumulateMetaIndex ¶ added in v1.2.0
type AccumulateMetaIndex struct {
// contains filtered or unexported fields
}
func (*AccumulateMetaIndex) GetBlockId ¶ added in v1.2.0
func (a *AccumulateMetaIndex) GetBlockId() uint64
func (*AccumulateMetaIndex) SetAccumulateMetaIndex ¶ added in v1.2.0
func (a *AccumulateMetaIndex) SetAccumulateMetaIndex(pkDataOffset uint32, blockId uint64, dataOffset, offset int64)
type BaseFilterOptions ¶ added in v1.1.0
type BaseFilterOptions struct { FiltersMap influxql.FilterMapValuer RedIdxMap map[int]struct{} // redundant columns, which are not required after filtering. FieldsIdx []int // field index in schema FilterTags []string // filter tag name CondFunctions *binaryfilterfunc.ConditionImpl }
type BloomFilterIterator ¶ added in v1.2.0
type BloomFilterIterator struct {
// contains filtered or unexported fields
}
func NewBloomFilterIterator ¶ added in v1.2.0
func NewBloomFilterIterator(f *FragmentIterators, oldFiles []TSSPFile, bfCols []string) (*BloomFilterIterator, error)
func (*BloomFilterIterator) AppendFileIdx ¶ added in v1.2.0
func (bfi *BloomFilterIterator) AppendFileIdx(fileIdx int)
func (*BloomFilterIterator) Write ¶ added in v1.2.0
func (bfi *BloomFilterIterator) Write(toLocal bool) error
type BooleanPreAgg ¶
type BooleanPreAgg struct {
// contains filtered or unexported fields
}
func NewBooleanPreAgg ¶
func NewBooleanPreAgg() *BooleanPreAgg
type BufferReader ¶ added in v1.0.0
type BufferReader struct {
// contains filtered or unexported fields
}
func NewBufferReader ¶ added in v1.0.0
func NewBufferReader(maxSize uint32) *BufferReader
func (*BufferReader) Read ¶ added in v1.0.0
func (br *BufferReader) Read(offset int64, size uint32) ([]byte, error)
func (*BufferReader) Reset ¶ added in v1.0.0
func (br *BufferReader) Reset(r TSSPFile)
type CSParquetManager ¶ added in v1.4.0
type CSParquetManager struct {
// contains filtered or unexported fields
}
func NewCSParquetManager ¶ added in v1.4.0
func NewCSParquetManager() *CSParquetManager
func (*CSParquetManager) Convert ¶ added in v1.4.0
func (m *CSParquetManager) Convert(files []TSSPFile, db string, rp string, mst string)
func (*CSParquetManager) Recover ¶ added in v1.4.0
func (m *CSParquetManager) Recover()
func (*CSParquetManager) Run ¶ added in v1.4.0
func (m *CSParquetManager) Run()
func (*CSParquetManager) Stop ¶ added in v1.4.0
func (m *CSParquetManager) Stop()
func (*CSParquetManager) Wait ¶ added in v1.4.0
func (m *CSParquetManager) Wait()
type CSParquetPlan ¶ added in v1.4.0
type CSParquetPlan struct { Mst string Id uint64 DstFile string TSSPFile string // contains filtered or unexported fields }
func (*CSParquetPlan) BeforeRun ¶ added in v1.4.0
func (p *CSParquetPlan) BeforeRun() error
func (*CSParquetPlan) IterRecord ¶ added in v1.4.0
func (p *CSParquetPlan) IterRecord(handler func(*record.Record) error) error
func (*CSParquetPlan) SetLogFile ¶ added in v1.4.0
func (p *CSParquetPlan) SetLogFile(file string)
type CSParquetTask ¶ added in v1.4.0
func (*CSParquetTask) Execute ¶ added in v1.4.0
func (t *CSParquetTask) Execute()
func (*CSParquetTask) LockFiles ¶ added in v1.4.0
func (t *CSParquetTask) LockFiles()
func (*CSParquetTask) Stop ¶ added in v1.4.0
func (t *CSParquetTask) Stop()
func (*CSParquetTask) UnLockFiles ¶ added in v1.4.0
func (t *CSParquetTask) UnLockFiles()
type ChunkDataBuilder ¶
type ChunkDataBuilder struct {
// contains filtered or unexported fields
}
func NewChunkDataBuilder ¶
func NewChunkDataBuilder(maxRowsPerSegment, maxSegmentLimit int) *ChunkDataBuilder
func (*ChunkDataBuilder) EncodeTime ¶
func (b *ChunkDataBuilder) EncodeTime(offset int64, timeSorted bool) error
type ChunkIterator ¶
type ChunkIterator struct { *FileIterator // contains filtered or unexported fields }
func NewChunkIterator ¶
func NewChunkIterator(r *FileIterator) *ChunkIterator
func (*ChunkIterator) Close ¶
func (c *ChunkIterator) Close()
func (*ChunkIterator) GetRecord ¶ added in v1.0.0
func (c *ChunkIterator) GetRecord() *record.Record
func (*ChunkIterator) GetSeriesID ¶ added in v1.0.0
func (c *ChunkIterator) GetSeriesID() uint64
func (*ChunkIterator) IncrChunkUsed ¶ added in v1.4.0
func (c *ChunkIterator) IncrChunkUsed()
func (*ChunkIterator) Next ¶
func (c *ChunkIterator) Next() bool
func (*ChunkIterator) WithLog ¶
func (c *ChunkIterator) WithLog(log *Log.Logger)
type ChunkIterators ¶
type ChunkIterators struct {
// contains filtered or unexported fields
}
func (*ChunkIterators) Close ¶
func (c *ChunkIterators) Close()
func (*ChunkIterators) Len ¶
func (c *ChunkIterators) Len() int
func (*ChunkIterators) Less ¶
func (c *ChunkIterators) Less(i, j int) bool
func (*ChunkIterators) Pop ¶
func (c *ChunkIterators) Pop() interface{}
func (*ChunkIterators) Push ¶
func (c *ChunkIterators) Push(v interface{})
func (*ChunkIterators) Swap ¶
func (c *ChunkIterators) Swap(i, j int)
func (*ChunkIterators) WithLog ¶
func (c *ChunkIterators) WithLog(log *Log.Logger)
type ChunkMeta ¶
type ChunkMeta struct {
// contains filtered or unexported fields
}
func GetChunkMeta ¶ added in v1.2.0
func NewChunkMeta ¶ added in v1.3.0
use for test
func (*ChunkMeta) AllocColMeta ¶ added in v1.1.0
func (m *ChunkMeta) AllocColMeta(ref *record.Field) *ColumnMeta
func (*ChunkMeta) DelEmptyColMeta ¶ added in v1.1.0
func (m *ChunkMeta) DelEmptyColMeta()
func (*ChunkMeta) GetColMeta ¶ added in v1.0.0
func (m *ChunkMeta) GetColMeta() []ColumnMeta
func (*ChunkMeta) GetTimeRangeBy ¶ added in v1.3.0
func (m *ChunkMeta) GetTimeRangeBy(index int) SegmentRange
func (*ChunkMeta) MinMaxTime ¶
func (*ChunkMeta) Rows ¶
func (m *ChunkMeta) Rows(ab PreAggBuilder) int
func (*ChunkMeta) SegmentCount ¶ added in v1.0.0
func (*ChunkMeta) TimeMeta ¶
func (m *ChunkMeta) TimeMeta() *ColumnMeta
func (*ChunkMeta) UnmarshalWithColumns ¶ added in v1.2.0
func (*ChunkMeta) Validation ¶ added in v1.4.0
func (m *ChunkMeta) Validation()
type ChunkMetaCodecCtx ¶ added in v1.4.0
type ChunkMetaCodecCtx struct {
// contains filtered or unexported fields
}
func GetChunkMetaCodecCtx ¶ added in v1.4.0
func GetChunkMetaCodecCtx() *ChunkMetaCodecCtx
func (*ChunkMetaCodecCtx) GetHeader ¶ added in v1.4.0
func (ctx *ChunkMetaCodecCtx) GetHeader() *ChunkMetaHeader
func (*ChunkMetaCodecCtx) GetIndex ¶ added in v1.4.0
func (ctx *ChunkMetaCodecCtx) GetIndex(val string) uint64
func (*ChunkMetaCodecCtx) GetValue ¶ added in v1.4.0
func (ctx *ChunkMetaCodecCtx) GetValue(idx int) string
func (*ChunkMetaCodecCtx) MemSize ¶ added in v1.4.0
func (ctx *ChunkMetaCodecCtx) MemSize() int
func (*ChunkMetaCodecCtx) Release ¶ added in v1.4.0
func (ctx *ChunkMetaCodecCtx) Release()
func (*ChunkMetaCodecCtx) SetTrailer ¶ added in v1.4.0
func (ctx *ChunkMetaCodecCtx) SetTrailer(trailer *Trailer)
type ChunkMetaContext ¶ added in v1.2.0
type ChunkMetaContext struct {
// contains filtered or unexported fields
}
func NewChunkMetaContext ¶ added in v1.2.0
func NewChunkMetaContext(schema record.Schemas) *ChunkMetaContext
func (*ChunkMetaContext) CodecCtx ¶ added in v1.4.0
func (ctx *ChunkMetaContext) CodecCtx() *ChunkMetaCodecCtx
func (*ChunkMetaContext) MemSize ¶ added in v1.2.0
func (ctx *ChunkMetaContext) MemSize() int
func (*ChunkMetaContext) Release ¶ added in v1.2.0
func (ctx *ChunkMetaContext) Release()
type ChunkMetaEntry ¶ added in v1.2.0
type ChunkMetaEntry struct {
// contains filtered or unexported fields
}
func NewChunkMetaEntry ¶ added in v1.2.0
func NewChunkMetaEntry(filePath string) *ChunkMetaEntry
func (*ChunkMetaEntry) GetKey ¶ added in v1.2.0
func (e *ChunkMetaEntry) GetKey() string
func (*ChunkMetaEntry) GetTime ¶ added in v1.2.0
func (e *ChunkMetaEntry) GetTime() time.Time
func (*ChunkMetaEntry) GetValue ¶ added in v1.2.0
func (e *ChunkMetaEntry) GetValue() interface{}
func (*ChunkMetaEntry) SetTime ¶ added in v1.2.0
func (e *ChunkMetaEntry) SetTime(time time.Time)
func (*ChunkMetaEntry) SetValue ¶ added in v1.2.0
func (e *ChunkMetaEntry) SetValue(value interface{})
func (*ChunkMetaEntry) Size ¶ added in v1.2.0
func (e *ChunkMetaEntry) Size() int64
type ChunkMetaHeader ¶ added in v1.4.0
type ChunkMetaHeader struct {
// contains filtered or unexported fields
}
func (*ChunkMetaHeader) AppendValue ¶ added in v1.4.0
func (h *ChunkMetaHeader) AppendValue(val string)
func (*ChunkMetaHeader) CopyTo ¶ added in v1.4.0
func (h *ChunkMetaHeader) CopyTo(dst *ChunkMetaHeader)
func (*ChunkMetaHeader) GetValue ¶ added in v1.4.0
func (h *ChunkMetaHeader) GetValue(idx int) string
func (*ChunkMetaHeader) Len ¶ added in v1.4.0
func (h *ChunkMetaHeader) Len() int
func (*ChunkMetaHeader) Marshal ¶ added in v1.4.0
func (h *ChunkMetaHeader) Marshal(dst []byte) []byte
func (*ChunkMetaHeader) Reset ¶ added in v1.4.0
func (h *ChunkMetaHeader) Reset()
func (*ChunkMetaHeader) Unmarshal ¶ added in v1.4.0
func (h *ChunkMetaHeader) Unmarshal(buf []byte)
type ColumnBuilder ¶
type ColumnBuilder struct {
// contains filtered or unexported fields
}
func NewColumnBuilder ¶
func NewColumnBuilder() *ColumnBuilder
func (*ColumnBuilder) BuildPreAgg ¶ added in v1.0.0
func (b *ColumnBuilder) BuildPreAgg()
func (*ColumnBuilder) EncodeColumn ¶
func (*ColumnBuilder) EncodeColumnBySize ¶ added in v1.2.0
func (*ColumnBuilder) SetEncodeMode ¶ added in v1.2.0
func (b *ColumnBuilder) SetEncodeMode(detached bool)
type ColumnIterator ¶ added in v1.0.0
type ColumnIterator struct {
// contains filtered or unexported fields
}
func NewColumnIterator ¶ added in v1.0.0
func NewColumnIterator(fi *FileIterator) *ColumnIterator
func (*ColumnIterator) Close ¶ added in v1.0.0
func (itr *ColumnIterator) Close()
func (*ColumnIterator) Error ¶ added in v1.0.0
func (itr *ColumnIterator) Error() error
func (*ColumnIterator) IncrChunkUsed ¶ added in v1.0.0
func (itr *ColumnIterator) IncrChunkUsed()
func (*ColumnIterator) IterCurrentChunk ¶ added in v1.3.0
func (itr *ColumnIterator) IterCurrentChunk(p ColumnIteratorPerformer) error
func (*ColumnIterator) NextChunkMeta ¶ added in v1.0.0
func (itr *ColumnIterator) NextChunkMeta() bool
func (*ColumnIterator) NextColumn ¶ added in v1.0.0
func (itr *ColumnIterator) NextColumn(colIdx int) (*record.Field, bool)
func (*ColumnIterator) PutCol ¶ added in v1.0.0
func (itr *ColumnIterator) PutCol(col *record.ColVal)
func (*ColumnIterator) Run ¶ added in v1.0.0
func (itr *ColumnIterator) Run(p ColumnIteratorPerformer) error
type ColumnIteratorPerformer ¶ added in v1.0.0
type ColumnMeta ¶
type ColumnMeta struct {
// contains filtered or unexported fields
}
func (*ColumnMeta) Clone ¶ added in v1.0.0
func (m *ColumnMeta) Clone() ColumnMeta
func (*ColumnMeta) GetPreAgg ¶ added in v1.2.0
func (m *ColumnMeta) GetPreAgg() []byte
func (*ColumnMeta) GetSegment ¶ added in v1.2.0
func (m *ColumnMeta) GetSegment(i int) (int64, uint32)
func (*ColumnMeta) IsTime ¶ added in v1.1.0
func (m *ColumnMeta) IsTime() bool
func (*ColumnMeta) Name ¶ added in v1.1.0
func (m *ColumnMeta) Name() string
func (*ColumnMeta) RowCount ¶
func (m *ColumnMeta) RowCount(ref *record.Field, decs *ReadContext) (int64, error)
func (*ColumnMeta) Type ¶ added in v1.2.0
func (m *ColumnMeta) Type() uint8
type ColumnReader ¶
type ColumnReader interface { ReadDataBlock(offset int64, size uint32, dst *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error) ReadMetaBlock(metaIdx int, id uint64, offset int64, size uint32, count uint32, dst *pool.Buffer, ioPriority int) ([]byte, *readcache.CachePage, error) UnrefCachePage(cachePage *readcache.CachePage) }
type CompactGroup ¶
type CompactGroup struct {
// contains filtered or unexported fields
}
func NewCompactGroup ¶
func NewCompactGroup(name string, toLevle uint16, count int) *CompactGroup
func (*CompactGroup) Add ¶ added in v1.3.0
func (g *CompactGroup) Add(item string)
func (*CompactGroup) Len ¶ added in v1.3.0
func (g *CompactGroup) Len() int
func (*CompactGroup) UpdateLevel ¶ added in v1.3.0
func (g *CompactGroup) UpdateLevel(lv uint16)
type CompactGroupBuilder ¶ added in v1.3.0
type CompactGroupBuilder struct {
// contains filtered or unexported fields
}
func (*CompactGroupBuilder) AddFile ¶ added in v1.3.0
func (b *CompactGroupBuilder) AddFile(f TSSPFile) bool
func (*CompactGroupBuilder) Init ¶ added in v1.3.0
func (b *CompactGroupBuilder) Init(name string, closing *int64, size int)
func (*CompactGroupBuilder) Limited ¶ added in v1.3.0
func (b *CompactGroupBuilder) Limited() bool
func (*CompactGroupBuilder) Release ¶ added in v1.3.0
func (b *CompactGroupBuilder) Release()
func (*CompactGroupBuilder) SwitchGroup ¶ added in v1.3.0
func (b *CompactGroupBuilder) SwitchGroup()
type CompactTask ¶ added in v1.3.0
func NewCompactTask ¶ added in v1.3.0
func NewCompactTask(table *MmsTables, plan *CompactGroup, full bool) *CompactTask
func (*CompactTask) BeforeExecute ¶ added in v1.3.0
func (t *CompactTask) BeforeExecute() bool
func (*CompactTask) Execute ¶ added in v1.3.0
func (t *CompactTask) Execute()
func (*CompactTask) Finish ¶ added in v1.3.0
func (t *CompactTask) Finish()
func (*CompactTask) IncrFull ¶ added in v1.3.0
func (t *CompactTask) IncrFull(n int64)
type CompactedFileInfo ¶
type Config ¶
type Config struct { SnapshotTblNum int FragmentsNumPerFlush int // contains filtered or unexported fields }
func GetColStoreConfig ¶ added in v1.1.0
func GetColStoreConfig() *Config
func GetTsStoreConfig ¶ added in v1.1.0
func GetTsStoreConfig() *Config
func NewColumnStoreConfig ¶ added in v1.1.1
func NewColumnStoreConfig() *Config
func NewTsStoreConfig ¶ added in v1.1.0
func NewTsStoreConfig() *Config
func (*Config) GetCompactionEnabled ¶ added in v1.2.0
func (*Config) GetMaxRowsPerSegment ¶ added in v1.1.0
func (*Config) GetMaxSegmentLimit ¶ added in v1.1.0
func (*Config) SetExpectedSegmentSize ¶ added in v1.2.0
func (*Config) SetFilesLimit ¶
func (*Config) SetMaxRowsPerSegment ¶
func (*Config) SetMaxSegmentLimit ¶
type CsChunkDataImp ¶ added in v1.2.0
type CsChunkDataImp struct {
// contains filtered or unexported fields
}
func (*CsChunkDataImp) EncodeChunk ¶ added in v1.2.0
func (c *CsChunkDataImp) EncodeChunk(b *ChunkDataBuilder, id uint64, offset int64, rec *record.Record, dst []byte, timeSorted bool) ([]byte, error)
func (*CsChunkDataImp) EncodeChunkForCompaction ¶ added in v1.2.0
func (c *CsChunkDataImp) EncodeChunkForCompaction(b *ChunkDataBuilder, offset int64, rec *record.Record, dst []byte, accumulateRowsIndex []int, timeSorted bool) ([]byte, error)
func (*CsChunkDataImp) SetAccumulateRowsIndex ¶ added in v1.2.0
func (c *CsChunkDataImp) SetAccumulateRowsIndex(accumulateRowsIndex []int)
func (*CsChunkDataImp) SetDetachedInfo ¶ added in v1.2.0
func (c *CsChunkDataImp) SetDetachedInfo(writeDetached bool)
type DDLBasePlan ¶ added in v1.4.0
type DDLBasePlan interface { Execute(dst map[string]DDLRespData, mstKeys map[string][][]byte, condition influxql.Expr, tr util.TimeRange, limit int) error Stop() }
DDLBasePlan as an abstraction of the DDL base plan
func NewDDLBasePlan ¶ added in v1.4.0
func NewDDLBasePlan(table TablesStore, idx IndexMergeSet, logger *Log.Logger, sh EngineShard, client metaclient.MetaClient, ddl hybridqp.DDLType) DDLBasePlan
type DDLRespData ¶ added in v1.4.0
type DDLRespData interface { Add(key, value string) ForEach(process func(key, value string)) Count() int }
DDLRespData as an abstraction of the DDL response data
func NewSeriesKeys ¶ added in v1.4.0
func NewSeriesKeys() DDLRespData
func NewTagSets ¶ added in v1.3.0
func NewTagSets() DDLRespData
type DetachedChunkMetaReader ¶ added in v1.2.0
type DetachedChunkMetaReader struct {
// contains filtered or unexported fields
}
func NewDetachedChunkMetaReader ¶ added in v1.2.0
func NewDetachedChunkMetaReader(path string, obsOpts *obs.ObsOptions) (*DetachedChunkMetaReader, error)
func (*DetachedChunkMetaReader) ReadChunkMeta ¶ added in v1.2.0
func (reader *DetachedChunkMetaReader) ReadChunkMeta(offset, length []int64) ([]*ChunkMeta, error)
type DetachedMetaDataReader ¶ added in v1.2.0
type DetachedMetaDataReader struct {
// contains filtered or unexported fields
}
func NewDetachedMetaDataReader ¶ added in v1.2.0
func NewDetachedMetaDataReader(path string, obsOpts *obs.ObsOptions, isSort bool) (*DetachedMetaDataReader, error)
func (*DetachedMetaDataReader) Close ¶ added in v1.3.0
func (reader *DetachedMetaDataReader) Close()
func (*DetachedMetaDataReader) InitReadBatch ¶ added in v1.2.0
func (reader *DetachedMetaDataReader) InitReadBatch(s []*SegmentMeta, schema record.Schemas)
func (*DetachedMetaDataReader) ReadBatch ¶ added in v1.2.0
func (reader *DetachedMetaDataReader) ReadBatch(dst *record.Record, decs *ReadContext) (*record.Record, error)
type DetachedMetaIndexReader ¶ added in v1.2.0
type DetachedMetaIndexReader struct {
// contains filtered or unexported fields
}
func NewDetachedMetaIndexReader ¶ added in v1.2.0
func NewDetachedMetaIndexReader(path string, obsOpts *obs.ObsOptions) (*DetachedMetaIndexReader, error)
func (*DetachedMetaIndexReader) Close ¶ added in v1.3.0
func (reader *DetachedMetaIndexReader) Close()
func (*DetachedMetaIndexReader) ReadMetaIndex ¶ added in v1.2.0
func (reader *DetachedMetaIndexReader) ReadMetaIndex(offset, length []int64) ([]*MetaIndex, error)
type DetachedPKDataReader ¶ added in v1.2.0
type DetachedPKDataReader struct {
// contains filtered or unexported fields
}
func NewDetachedPKDataReader ¶ added in v1.2.0
func NewDetachedPKDataReader(path string, opts *obs.ObsOptions) (*DetachedPKDataReader, error)
func (*DetachedPKDataReader) Close ¶ added in v1.3.0
func (reader *DetachedPKDataReader) Close()
func (*DetachedPKDataReader) Read ¶ added in v1.2.0
func (reader *DetachedPKDataReader) Read(offset, length []int64, metas []*colstore.DetachedPKMeta, info *colstore.DetachedPKMetaInfo) ([]*colstore.DetachedPKData, error)
type DetachedPKMetaInfoReader ¶ added in v1.2.0
type DetachedPKMetaInfoReader struct {
// contains filtered or unexported fields
}
func NewDetachedPKMetaInfoReader ¶ added in v1.2.0
func NewDetachedPKMetaInfoReader(path string, opts *obs.ObsOptions) (*DetachedPKMetaInfoReader, error)
func (*DetachedPKMetaInfoReader) Close ¶ added in v1.3.0
func (reader *DetachedPKMetaInfoReader) Close()
func (*DetachedPKMetaInfoReader) Read ¶ added in v1.2.0
func (reader *DetachedPKMetaInfoReader) Read() (*colstore.DetachedPKMetaInfo, error)
type DetachedPKMetaReader ¶ added in v1.2.0
type DetachedPKMetaReader struct {
// contains filtered or unexported fields
}
func NewDetachedPKMetaReader ¶ added in v1.2.0
func NewDetachedPKMetaReader(path string, opts *obs.ObsOptions) (*DetachedPKMetaReader, error)
func (*DetachedPKMetaReader) Close ¶ added in v1.3.0
func (reader *DetachedPKMetaReader) Close()
func (*DetachedPKMetaReader) Read ¶ added in v1.2.0
func (reader *DetachedPKMetaReader) Read(offset, length []int64) ([]*colstore.DetachedPKMeta, error)
type DetachedSegmentEntry ¶ added in v1.2.0
type DetachedSegmentEntry struct {
// contains filtered or unexported fields
}
func NewSegmentMetaDataEntry ¶ added in v1.2.0
func NewSegmentMetaDataEntry(segmentID string) *DetachedSegmentEntry
func (*DetachedSegmentEntry) GetKey ¶ added in v1.2.0
func (e *DetachedSegmentEntry) GetKey() string
func (*DetachedSegmentEntry) GetTime ¶ added in v1.2.0
func (e *DetachedSegmentEntry) GetTime() time.Time
func (*DetachedSegmentEntry) GetValue ¶ added in v1.2.0
func (e *DetachedSegmentEntry) GetValue() interface{}
func (*DetachedSegmentEntry) SetTime ¶ added in v1.2.0
func (e *DetachedSegmentEntry) SetTime(time time.Time)
func (*DetachedSegmentEntry) SetValue ¶ added in v1.2.0
func (e *DetachedSegmentEntry) SetValue(value interface{})
func (*DetachedSegmentEntry) Size ¶ added in v1.2.0
func (e *DetachedSegmentEntry) Size() int64
type EncodeChunkData ¶ added in v1.2.0
type EncodeChunkData interface { EncodeChunk(b *ChunkDataBuilder, id uint64, offset int64, rec *record.Record, dst []byte, timeSorted bool) ([]byte, error) EncodeChunkForCompaction(b *ChunkDataBuilder, offset int64, rec *record.Record, dst []byte, accumulateRowsIndex []int, timeSorted bool) ([]byte, error) SetAccumulateRowsIndex(rowsPerSegment []int) SetDetachedInfo(writeDetached bool) }
type EncodeColumnMode ¶ added in v1.2.0
type EncodeColumnMode interface {
// contains filtered or unexported methods
}
type EngineShard ¶ added in v1.3.0
type EngineShard interface { IsOpened() bool OpenAndEnable(client metaclient.MetaClient) error GetDataPath() string GetIdent() *meta.ShardIdentifier }
type EventBus ¶ added in v1.3.0
type EventBus struct {
// contains filtered or unexported fields
}
func DefaultEventBus ¶ added in v1.3.0
func DefaultEventBus() *EventBus
type EventContext ¶ added in v1.3.0
type EventContext struct {
// contains filtered or unexported fields
}
func NewEventContext ¶ added in v1.3.0
func NewEventContext(idx IndexMergeSet, scheduler *scheduler.TaskScheduler, signal chan struct{}) *EventContext
type Events ¶ added in v1.3.0
type Events struct {
// contains filtered or unexported fields
}
func (*Events) Finish ¶ added in v1.3.0
func (es *Events) Finish(success bool, ctx *EventContext)
func (*Events) TriggerNewFile ¶ added in v1.3.0
func (*Events) TriggerReplaceFile ¶ added in v1.3.0
func (*Events) TriggerWriteChunkMeta ¶ added in v1.3.0
func (*Events) TriggerWriteRecord ¶ added in v1.3.0
type ExtraData ¶ added in v1.4.0
type ExtraData struct { TimeStoreFlag uint8 ChunkMetaCompressFlag uint8 ChunkMetaHeader *ChunkMetaHeader // contains filtered or unexported fields }
func (*ExtraData) EnableTimeStore ¶ added in v1.4.0
func (e *ExtraData) EnableTimeStore()
func (*ExtraData) MarshalExtraData ¶ added in v1.4.0
func (*ExtraData) SetChunkMetaCompressFlag ¶ added in v1.4.0
func (*ExtraData) SetChunkMetaHeader ¶ added in v1.4.0
func (e *ExtraData) SetChunkMetaHeader(header *ChunkMetaHeader)
type FileInfoExtend ¶ added in v1.3.0
type FileIterator ¶
type FileIterator struct {
// contains filtered or unexported fields
}
func NewFileIterator ¶
func NewFileIterator(r TSSPFile, log *Log.Logger) *FileIterator
func (*FileIterator) Close ¶
func (itr *FileIterator) Close()
func (*FileIterator) GetCurtChunkMeta ¶ added in v1.0.0
func (itr *FileIterator) GetCurtChunkMeta() *ChunkMeta
func (*FileIterator) NextChunkMeta ¶
func (itr *FileIterator) NextChunkMeta() bool
func (*FileIterator) ReadData ¶ added in v1.2.0
func (itr *FileIterator) ReadData(offset int64, size uint32) ([]byte, error)
func (*FileIterator) WithLog ¶
func (itr *FileIterator) WithLog(log *Log.Logger)
type FileIterators ¶
type FileIterators []*FileIterator
func (FileIterators) AverageRows ¶
func (i FileIterators) AverageRows() int
func (FileIterators) Close ¶
func (i FileIterators) Close()
func (FileIterators) MaxChunkRows ¶
func (i FileIterators) MaxChunkRows() int
func (FileIterators) MaxColumns ¶
func (i FileIterators) MaxColumns() int
type FileReader ¶ added in v1.1.0
type FileReader interface { Open() error Close() error ReadData(cm *ChunkMeta, segment int, dst *record.Record, ctx *ReadContext, ioPriority int) (*record.Record, error) Ref() Unref() int64 MetaIndexAt(idx int) (*MetaIndex, error) MetaIndex(id uint64, tr util.TimeRange) (int, *MetaIndex, error) ChunkMeta(id uint64, offset int64, size, itemCount uint32, metaIdx int, ctx *ChunkMetaContext, ioPriority int) (*ChunkMeta, error) ReadMetaBlock(metaIdx int, id uint64, offset int64, size uint32, count uint32, buf *pool.Buffer, ioPriority int) ([]byte, *readcache.CachePage, error) ReadDataBlock(offset int64, size uint32, dst *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error) Read(offset int64, size uint32, dst *[]byte, ioPriority int) ([]byte, error) ReadChunkMetaData(metaIdx int, m *MetaIndex, dst []ChunkMeta, ioPriority int) ([]ChunkMeta, error) LoadIdTimes(isOrder bool, p *IdTimePairs) error Stat() *Trailer MinMaxSeriesID() (min, max uint64, err error) MinMaxTime() (min, max int64, err error) Contains(id uint64, tm util.TimeRange) bool ContainsTime(tm util.TimeRange) bool ContainsId(id uint64) bool Name() string FileName() string Rename(newName string) error RenameOnObs(obsName string, tmp bool, obsOpt *obs.ObsOptions) error FileSize() int64 InMemSize() int64 Version() uint64 FreeMemory() FreeFileHandle() error LoadIntoMemory() error LoadComponents() error AverageChunkRows() int MaxChunkRows() int GetFileReaderRef() int64 ChunkMetaCompressMode() uint8 }
type FileReaderContext ¶ added in v1.2.0
type FileReaderContext struct {
// contains filtered or unexported fields
}
func NewFileReaderContext ¶ added in v1.2.0
func NewFileReaderContext(tr util.TimeRange, schemas record.Schemas, decs *ReadContext, filterOpts *FilterOptions, filterBitmap *bitmap.FilterBitmap, isOrder bool) *FileReaderContext
func (*FileReaderContext) GetSchemas ¶ added in v1.3.0
func (f *FileReaderContext) GetSchemas() record.Schemas
type FileSwapper ¶ added in v1.2.0
type FileSwapper struct {
// contains filtered or unexported fields
}
func NewFileSwapper ¶ added in v1.2.0
func (*FileSwapper) MustClose ¶ added in v1.2.0
func (s *FileSwapper) MustClose()
func (*FileSwapper) SetWriter ¶ added in v1.2.0
func (s *FileSwapper) SetWriter(w io.WriteCloser)
type FilterOptions ¶
type FilterOptions struct {
// contains filtered or unexported fields
}
func NewFilterOpts ¶
func NewFilterOpts(cond influxql.Expr, filterOption *BaseFilterOptions, tags *influx.PointTags, rowFilters *[]clv.RowFilter) *FilterOptions
func (*FilterOptions) GetCond ¶ added in v1.1.0
func (fo *FilterOptions) GetCond() influxql.Expr
func (*FilterOptions) SetCondFuncs ¶ added in v1.1.0
func (fo *FilterOptions) SetCondFuncs(filterOption *BaseFilterOptions)
type FirstLastReader ¶ added in v1.1.0
type FirstLastReader struct {
// contains filtered or unexported fields
}
func (*FirstLastReader) Init ¶ added in v1.1.0
func (r *FirstLastReader) Init(cm *ChunkMeta, cr ColumnReader, ref *record.Field, dst *record.Record, first bool) *FirstLastReader
func (*FirstLastReader) Read ¶ added in v1.1.0
func (r *FirstLastReader) Read(ctx *ReadContext, copied bool, ioPriority int) error
func (*FirstLastReader) ReadMaxFromPreAgg ¶ added in v1.4.0
func (r *FirstLastReader) ReadMaxFromPreAgg(colMeta *ColumnMeta) (interface{}, int64, bool)
func (*FirstLastReader) ReadMinFromPreAgg ¶ added in v1.4.0
func (r *FirstLastReader) ReadMinFromPreAgg(colMeta *ColumnMeta) (interface{}, int64, bool)
func (*FirstLastReader) Release ¶ added in v1.1.0
func (r *FirstLastReader) Release()
type FloatPreAgg ¶
type FloatPreAgg struct {
// contains filtered or unexported fields
}
FloatPreAgg If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.
func NewFloatPreAgg ¶
func NewFloatPreAgg() *FloatPreAgg
func (*FloatPreAgg) VLCDecode ¶ added in v1.4.0
func (m *FloatPreAgg) VLCDecode(src []byte) ([]byte, error)
func (*FloatPreAgg) VLCEncode ¶ added in v1.4.0
func (m *FloatPreAgg) VLCEncode(dst []byte) []byte
type FragmentIterator ¶ added in v1.2.0
type FragmentIterator interface {
// contains filtered or unexported methods
}
type FragmentIterators ¶ added in v1.1.1
type FragmentIterators struct { SortKeyFileds []record.Field TableData Conf *Config PkRec []*record.Record RecordResult *record.Record TimeClusterResult *record.Record // contains filtered or unexported fields }
func (*FragmentIterators) Close ¶ added in v1.1.1
func (f *FragmentIterators) Close()
func (*FragmentIterators) CompareWithBreakPoint ¶ added in v1.1.1
func (f *FragmentIterators) CompareWithBreakPoint(curPos, breakPos int) bool
func (*FragmentIterators) IsEmpty ¶ added in v1.2.0
func (f *FragmentIterators) IsEmpty() bool
func (*FragmentIterators) Len ¶ added in v1.1.1
func (f *FragmentIterators) Len() int
func (*FragmentIterators) Less ¶ added in v1.1.1
func (f *FragmentIterators) Less(i, j int) bool
func (*FragmentIterators) Pop ¶ added in v1.1.1
func (f *FragmentIterators) Pop() interface{}
func (*FragmentIterators) Push ¶ added in v1.1.1
func (f *FragmentIterators) Push(v interface{})
func (*FragmentIterators) Swap ¶ added in v1.1.1
func (f *FragmentIterators) Swap(i, j int)
func (*FragmentIterators) WithLog ¶ added in v1.1.1
func (f *FragmentIterators) WithLog(log *Log.Logger)
type FragmentIteratorsPool ¶ added in v1.1.1
type FragmentIteratorsPool struct {
// contains filtered or unexported fields
}
func NewFragmentIteratorsPool ¶ added in v1.1.1
func NewFragmentIteratorsPool(n int) *FragmentIteratorsPool
type GenDDLRespDataFunc ¶ added in v1.4.0
type GenDDLRespDataFunc func() DDLRespData
GenDDLRespDataFunc as a function to generate DDLRespData
func GetDDLRespData ¶ added in v1.4.0
func GetDDLRespData(ddl hybridqp.DDLType) GenDDLRespDataFunc
type GenSequenceHandlerFunc ¶ added in v1.4.0
type GenSequenceHandlerFunc func(idx IndexMergeSet, condition influxql.Expr, tr *util.TimeRange, limit int) SequenceIteratorHandler
GenSequenceHandlerFunc as a function to generate SequenceHandler
func GetDDLDDLSequenceHandler ¶ added in v1.4.0
func GetDDLDDLSequenceHandler(ddl hybridqp.DDLType) GenSequenceHandlerFunc
type HotFileManager ¶ added in v1.4.0
type HotFileManager struct {
// contains filtered or unexported fields
}
func NewHotFileManager ¶ added in v1.4.0
func NewHotFileManager() *HotFileManager
func (*HotFileManager) Add ¶ added in v1.4.0
func (m *HotFileManager) Add(f TSSPFile)
func (*HotFileManager) AddAll ¶ added in v1.4.0
func (m *HotFileManager) AddAll(files []TSSPFile)
func (*HotFileManager) AllocLoadMemory ¶ added in v1.4.0
func (m *HotFileManager) AllocLoadMemory(size int64) bool
func (*HotFileManager) BackgroundFree ¶ added in v1.4.0
func (m *HotFileManager) BackgroundFree()
func (*HotFileManager) Free ¶ added in v1.4.0
func (m *HotFileManager) Free()
func (*HotFileManager) InHotDuration ¶ added in v1.4.0
func (m *HotFileManager) InHotDuration(f TSSPFile) bool
func (*HotFileManager) IncrMemorySize ¶ added in v1.4.0
func (m *HotFileManager) IncrMemorySize(size int64)
func (*HotFileManager) Run ¶ added in v1.4.0
func (m *HotFileManager) Run()
func (*HotFileManager) SetMaxMemorySize ¶ added in v1.4.0
func (m *HotFileManager) SetMaxMemorySize(size int64)
func (*HotFileManager) Stop ¶ added in v1.4.0
func (m *HotFileManager) Stop()
type HotFileReader ¶ added in v1.4.0
type HotFileReader struct { fileops.BasicFileReader // contains filtered or unexported fields }
func NewHotFileReader ¶ added in v1.4.0
func NewHotFileReader(r fileops.BasicFileReader, buf []byte) *HotFileReader
func (*HotFileReader) IsMmapRead ¶ added in v1.4.0
func (r *HotFileReader) IsMmapRead() bool
func (*HotFileReader) Release ¶ added in v1.4.0
func (r *HotFileReader) Release()
func (*HotFileReader) Size ¶ added in v1.4.0
func (r *HotFileReader) Size() (int64, error)
type HotFileWriter ¶ added in v1.4.0
type HotFileWriter struct { fileops.FileWriter // contains filtered or unexported fields }
func NewHotFileWriter ¶ added in v1.4.0
func NewHotFileWriter(w fileops.FileWriter) *HotFileWriter
func (*HotFileWriter) AppendChunkMetaToData ¶ added in v1.4.0
func (w *HotFileWriter) AppendChunkMetaToData() error
func (*HotFileWriter) BuildHotFileReader ¶ added in v1.4.0
func (w *HotFileWriter) BuildHotFileReader(r fileops.BasicFileReader) *HotFileReader
func (*HotFileWriter) MemSize ¶ added in v1.4.0
func (w *HotFileWriter) MemSize() int
func (*HotFileWriter) Release ¶ added in v1.4.0
func (w *HotFileWriter) Release()
func (*HotFileWriter) WriteChunkMeta ¶ added in v1.4.0
func (w *HotFileWriter) WriteChunkMeta(b []byte) (int, error)
type IdTimePairs ¶
func GetIDTimePairs ¶
func GetIDTimePairs(name string) *IdTimePairs
func (*IdTimePairs) Add ¶
func (p *IdTimePairs) Add(id uint64, tm int64)
func (*IdTimePairs) AddRowCounts ¶
func (p *IdTimePairs) AddRowCounts(rowCounts int64)
func (*IdTimePairs) Len ¶
func (p *IdTimePairs) Len() int
func (*IdTimePairs) Marshal ¶
func (p *IdTimePairs) Marshal(encTimes bool, dst []byte, ctx *encoding.CoderContext) []byte
func (*IdTimePairs) Reset ¶
func (p *IdTimePairs) Reset(name string)
func (*IdTimePairs) UnmarshalBlocks ¶ added in v1.4.0
func (p *IdTimePairs) UnmarshalBlocks(decTimes bool, src []byte, startIdx int, decoder *encoding.CoderContext) (int, int, int, error)
func (*IdTimePairs) UnmarshalHeader ¶ added in v1.4.0
func (p *IdTimePairs) UnmarshalHeader(src []byte) (uint32, error)
type ImmTable ¶ added in v1.1.0
type ImmTable interface { GetEngineType() config.EngineType GetCompactionType(name string) config.CompactionType NewFileIterators(m *MmsTables, group *CompactGroup) (FilesInfo, error) AddTSSPFiles(m *MmsTables, name string, isOrder bool, files ...TSSPFile) AddBothTSSPFiles(flushed *bool, m *MmsTables, name string, orderFiles []TSSPFile, unorderFiles []TSSPFile) LevelPlan(m *MmsTables, level uint16) []*CompactGroup SetMstInfo(name string, mstInfo *meta.MeasurementInfo) GetMstInfo(name string) (*meta.MeasurementInfo, bool) UpdateAccumulateMetaIndexInfo(name string, index *AccumulateMetaIndex) FullyCompacted(m *MmsTables) bool // contains filtered or unexported methods }
type IndexCompressWriter ¶ added in v1.2.0
type IndexCompressWriter struct {
// contains filtered or unexported fields
}
func (*IndexCompressWriter) BlockSize ¶ added in v1.2.0
func (w *IndexCompressWriter) BlockSize() int
func (*IndexCompressWriter) Close ¶ added in v1.2.0
func (w *IndexCompressWriter) Close() error
func (*IndexCompressWriter) CopyTo ¶ added in v1.2.0
func (w *IndexCompressWriter) CopyTo(to io.Writer) (int, error)
func (*IndexCompressWriter) GetWriter ¶ added in v1.2.0
func (w *IndexCompressWriter) GetWriter() *bufio.Writer
func (*IndexCompressWriter) Init ¶ added in v1.2.0
func (w *IndexCompressWriter) Init(name string, lock *string, cacheMeta bool, limitCompact bool)
func (*IndexCompressWriter) MetaDataBlocks ¶ added in v1.2.0
func (w *IndexCompressWriter) MetaDataBlocks(dst [][]byte) [][]byte
func (*IndexCompressWriter) Size ¶ added in v1.2.0
func (w *IndexCompressWriter) Size() int
func (*IndexCompressWriter) SwitchMetaBuffer ¶ added in v1.2.0
func (w *IndexCompressWriter) SwitchMetaBuffer() (int, error)
type IndexFrags ¶ added in v1.2.0
type IndexFrags interface { BasePath() string FragCount() int64 IndexCount() int Indexes() interface{} AppendIndexes(...interface{}) FragRanges() []fragment.FragmentRanges AppendFragRanges(...fragment.FragmentRanges) AddFragCount(int64) SetErr(error) GetErr() error Size() int }
func GetDetachedSegmentTask ¶ added in v1.2.0
func GetDetachedSegmentTask(queryID string) (IndexFrags, bool)
type IndexMergeSet ¶ added in v1.3.0
type IndexMergeSet interface { GetSeries(sid uint64, buf []byte, condition influxql.Expr, callback func(key *influx.SeriesKey)) error GetSeriesBytes(sid uint64, buf []byte, condition influxql.Expr, callback func(key *influx.SeriesBytes)) error SearchSeriesKeys(series [][]byte, name []byte, condition influxql.Expr) ([][]byte, error) }
type IndexWriter ¶ added in v1.2.0
type IndexWriter interface { Init(name string, lock *string, cacheMeta bool, limitCompact bool) Write(p []byte) (int, error) Size() int BlockSize() int CopyTo(to io.Writer) (int, error) SwitchMetaBuffer() (int, error) MetaDataBlocks(dst [][]byte) [][]byte Close() error // contains filtered or unexported methods }
func NewPKIndexWriter ¶ added in v1.2.0
func NewPKIndexWriter(indexName string, cacheMeta bool, limitCompact bool, lockPath *string) IndexWriter
type IntegerPreAgg ¶
type IntegerPreAgg struct {
// contains filtered or unexported fields
}
IntegerPreAgg If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.
func NewIntegerPreAgg ¶
func NewIntegerPreAgg() *IntegerPreAgg
func (*IntegerPreAgg) VLCDecode ¶ added in v1.4.0
func (m *IntegerPreAgg) VLCDecode(src []byte) ([]byte, error)
func (*IntegerPreAgg) VLCEncode ¶ added in v1.4.0
func (m *IntegerPreAgg) VLCEncode(dst []byte) []byte
type IteratorByBlock ¶ added in v1.2.0
type IteratorByBlock struct {
// contains filtered or unexported fields
}
IteratorByBlock for single mst
func NewIteratorByBlock ¶ added in v1.2.0
func NewIteratorByBlock(f *FragmentIterators, conf *Config, group FilesInfo, accumulateMetaIndex *AccumulateMetaIndex) *IteratorByBlock
func (*IteratorByBlock) WriteDetachedMeta ¶ added in v1.2.0
func (ib *IteratorByBlock) WriteDetachedMeta(pkSchema record.Schemas) error
type IteratorByRow ¶ added in v1.2.0
type IteratorByRow struct {
// contains filtered or unexported fields
}
func NewIteratorByRow ¶ added in v1.2.0
func NewIteratorByRow(f *FragmentIterators, conf *Config) *IteratorByRow
func (*IteratorByRow) GetBreakPoint ¶ added in v1.2.0
func (ir *IteratorByRow) GetBreakPoint()
func (*IteratorByRow) NextWithBreakPoint ¶ added in v1.2.0
func (ir *IteratorByRow) NextWithBreakPoint()
type Location ¶
type Location struct {
// contains filtered or unexported fields
}
func NewLocation ¶
func NewLocation(r TSSPFile, ctx *ReadContext) *Location
func (*Location) AscendingDone ¶ added in v1.1.0
func (l *Location) AscendingDone()
func (*Location) DescendingDone ¶ added in v1.1.0
func (l *Location) DescendingDone()
func (*Location) GetChunkMeta ¶
func (*Location) SetChunkMeta ¶ added in v1.2.0
func (*Location) SetClosedSignal ¶ added in v1.3.0
func (*Location) SetFragmentRanges ¶ added in v1.1.0
func (l *Location) SetFragmentRanges(frs []*fragment.FragmentRange)
type LocationCursor ¶
type LocationCursor struct {
// contains filtered or unexported fields
}
func NewLocationCursor ¶
func NewLocationCursor(n int) *LocationCursor
func (*LocationCursor) AddFilterRecPool ¶ added in v1.1.0
func (l *LocationCursor) AddFilterRecPool(pool *record.CircularRecordPool)
func (*LocationCursor) AddLocation ¶
func (l *LocationCursor) AddLocation(loc *Location)
func (*LocationCursor) Close ¶ added in v1.1.0
func (l *LocationCursor) Close()
func (*LocationCursor) FragmentCount ¶ added in v1.1.0
func (l *LocationCursor) FragmentCount() int
func (*LocationCursor) Len ¶
func (l *LocationCursor) Len() int
func (*LocationCursor) Less ¶
func (l *LocationCursor) Less(i, j int) bool
func (*LocationCursor) ReadData ¶
func (l *LocationCursor) ReadData(filterOpts *FilterOptions, dst *record.Record, filterBitmap *bitmap.FilterBitmap, unnestOperator UnnestOperator) (*record.Record, error)
func (*LocationCursor) ReadMeta ¶
func (l *LocationCursor) ReadMeta(filterOpts *FilterOptions, dst *record.Record, filterBitmap *bitmap.FilterBitmap) (*record.Record, error)
func (*LocationCursor) ReadOutOfOrderMeta ¶
func (l *LocationCursor) ReadOutOfOrderMeta(filterOpts *FilterOptions, dst *record.Record) (*record.Record, error)
func (*LocationCursor) Reset ¶ added in v1.3.0
func (l *LocationCursor) Reset()
func (*LocationCursor) Reverse ¶
func (l *LocationCursor) Reverse()
func (*LocationCursor) RowCount ¶ added in v1.1.0
func (l *LocationCursor) RowCount() int
func (*LocationCursor) Swap ¶
func (l *LocationCursor) Swap(i, j int)
type MatchAllOperator ¶ added in v1.3.0
type MatchAllOperator struct {
// contains filtered or unexported fields
}
func (*MatchAllOperator) Compute ¶ added in v1.3.0
func (r *MatchAllOperator) Compute(rec *record.Record)
type MeasurementInProcess ¶ added in v1.2.0
type MeasurementInProcess struct {
// contains filtered or unexported fields
}
func NewMeasurementInProcess ¶ added in v1.2.0
func NewMeasurementInProcess() *MeasurementInProcess
func (*MeasurementInProcess) Add ¶ added in v1.2.0
func (m *MeasurementInProcess) Add(name string) bool
func (*MeasurementInProcess) Del ¶ added in v1.2.0
func (m *MeasurementInProcess) Del(name string)
func (*MeasurementInProcess) Has ¶ added in v1.3.0
func (m *MeasurementInProcess) Has(name string) bool
type MergeColPool ¶ added in v1.1.0
type MergeColPool struct {
// contains filtered or unexported fields
}
func (*MergeColPool) Get ¶ added in v1.1.0
func (p *MergeColPool) Get() *record.ColVal
func (*MergeColPool) Put ¶ added in v1.1.0
func (p *MergeColPool) Put(col *record.ColVal)
type MergeContext ¶ added in v1.3.0
type MergeContext struct {
// contains filtered or unexported fields
}
func BuildMergeContext ¶ added in v1.3.0
func BuildMergeContext(mst string, files *TSSPFiles, full bool, lmt *lastMergeTime) []*MergeContext
func NewMergeContext ¶ added in v1.0.0
func NewMergeContext(mst string, level uint16, mergeSelf bool) *MergeContext
func (*MergeContext) AddUnordered ¶ added in v1.3.0
func (ctx *MergeContext) AddUnordered(f TSSPFile)
func (*MergeContext) Limited ¶ added in v1.3.0
func (ctx *MergeContext) Limited() bool
func (*MergeContext) MergeSelf ¶ added in v1.3.0
func (ctx *MergeContext) MergeSelf() bool
func (*MergeContext) MergeSelfFast ¶ added in v1.3.0
func (ctx *MergeContext) MergeSelfFast() bool
func (*MergeContext) Release ¶ added in v1.3.0
func (ctx *MergeContext) Release()
func (*MergeContext) Sort ¶ added in v1.3.0
func (ctx *MergeContext) Sort()
func (*MergeContext) ToLevel ¶ added in v1.3.0
func (ctx *MergeContext) ToLevel() uint16
func (*MergeContext) UnorderedLen ¶ added in v1.3.0
func (ctx *MergeContext) UnorderedLen() int
func (*MergeContext) UpdateLevel ¶ added in v1.3.0
func (ctx *MergeContext) UpdateLevel(l uint16)
type MergePerformers ¶ added in v1.3.0
type MergePerformers struct {
// contains filtered or unexported fields
}
func NewMergePerformers ¶ added in v1.3.0
func NewMergePerformers(ur *UnorderedReader) *MergePerformers
func (*MergePerformers) Close ¶ added in v1.3.0
func (c *MergePerformers) Close()
func (*MergePerformers) Closed ¶ added in v1.3.0
func (c *MergePerformers) Closed() bool
func (*MergePerformers) Len ¶ added in v1.3.0
func (c *MergePerformers) Len() int
func (*MergePerformers) Less ¶ added in v1.3.0
func (c *MergePerformers) Less(i, j int) bool
func (*MergePerformers) Next ¶ added in v1.3.0
func (c *MergePerformers) Next() error
func (*MergePerformers) Pop ¶ added in v1.3.0
func (c *MergePerformers) Pop() interface{}
func (*MergePerformers) Push ¶ added in v1.3.0
func (c *MergePerformers) Push(v interface{})
func (*MergePerformers) Release ¶ added in v1.3.0
func (c *MergePerformers) Release()
func (*MergePerformers) Swap ¶ added in v1.3.0
func (c *MergePerformers) Swap(i, j int)
type MergeSelf ¶ added in v1.3.0
type MergeSelf struct {
// contains filtered or unexported fields
}
func (*MergeSelf) InitEvents ¶ added in v1.3.0
func (m *MergeSelf) InitEvents(ctx *MergeContext) *Events
type MergeSelfParquetEvent ¶ added in v1.3.0
type MergeSelfParquetEvent struct {
TSSP2ParquetEvent
}
func (*MergeSelfParquetEvent) Instance ¶ added in v1.3.0
func (e *MergeSelfParquetEvent) Instance() Event
func (*MergeSelfParquetEvent) OnWriteRecord ¶ added in v1.3.0
func (e *MergeSelfParquetEvent) OnWriteRecord(rec *record.Record)
type MetaControl ¶ added in v1.3.0
type MetaControl interface { Push(MetaDataInfo) Pop() (MetaDataInfo, bool) IsEmpty() bool }
func NewMetaControl ¶ added in v1.3.0
func NewMetaControl(isQueue bool, count int) MetaControl
type MetaData ¶ added in v1.3.0
type MetaData struct {
// contains filtered or unexported fields
}
func NewMetaData ¶ added in v1.3.0
func (*MetaData) GetBlockIndex ¶ added in v1.3.0
func (*MetaData) GetContentBlockLength ¶ added in v1.3.0
func (*MetaData) GetContentBlockOffset ¶ added in v1.3.0
func (*MetaData) GetMaxTime ¶ added in v1.3.0
func (*MetaData) GetMinTime ¶ added in v1.3.0
type MetaDataInfo ¶ added in v1.3.0
type MetaIndex ¶
type MetaIndex struct {
// contains filtered or unexported fields
}
MetaIndex If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.
func GetMetaIndexAndBlockId ¶ added in v1.3.0
type MetaQueue ¶ added in v1.3.0
type MetaQueue []MetaDataInfo
func (*MetaQueue) Pop ¶ added in v1.3.0
func (q *MetaQueue) Pop() (MetaDataInfo, bool)
func (*MetaQueue) Push ¶ added in v1.3.0
func (q *MetaQueue) Push(v MetaDataInfo)
type MetaStack ¶ added in v1.3.0
type MetaStack []MetaDataInfo
func (*MetaStack) Pop ¶ added in v1.3.0
func (s *MetaStack) Pop() (MetaDataInfo, bool)
func (*MetaStack) Push ¶ added in v1.3.0
func (s *MetaStack) Push(value MetaDataInfo)
type MmsIdTime ¶
type MmsIdTime struct {
// contains filtered or unexported fields
}
func NewMmsIdTime ¶
func NewMmsIdTime(sc *SeriesCounter) *MmsIdTime
type MmsReaders ¶
type MmsReaders struct { Orders TableReaders OutOfOrders TableReaders }
type MmsTables ¶
type MmsTables struct { Order map[string]*TSSPFiles // {"cpu_0001": *TSSPFiles} OutOfOrder map[string]*TSSPFiles // {"cpu_0001": *TSSPFiles} CSFiles map[string]*TSSPFiles // {"cpu_0001": *TSSPFiles} tsspFiles for columnStore PKFiles map[string]*colstore.PKFiles // {"cpu_0001": *PKFiles} PKFiles for columnStore ImmTable ImmTable Conf *Config // contains filtered or unexported fields }
func NewTableStore ¶
func (*MmsTables) AddBothTSSPFiles ¶ added in v1.2.0
func (*MmsTables) AddRowCountsBySid ¶
func (*MmsTables) AddTSSPFiles ¶
now not use for tsEngine
func (*MmsTables) CompactDone ¶
func (*MmsTables) CompactionDisable ¶
func (m *MmsTables) CompactionDisable()
func (*MmsTables) CompactionEnable ¶
func (m *MmsTables) CompactionEnable()
func (*MmsTables) CompactionEnabled ¶
func (*MmsTables) CopyCSFiles ¶ added in v1.3.0
func (*MmsTables) DisableCompAndMerge ¶ added in v1.0.0
func (m *MmsTables) DisableCompAndMerge()
func (*MmsTables) DropMeasurement ¶
func (*MmsTables) EnableCompAndMerge ¶ added in v1.0.0
func (m *MmsTables) EnableCompAndMerge()
func (*MmsTables) FreeAllMemReader ¶
func (m *MmsTables) FreeAllMemReader()
func (*MmsTables) FreeSequencer ¶ added in v1.0.0
func (*MmsTables) FullCompact ¶
func (*MmsTables) FullyCompacted ¶ added in v1.2.0
func (*MmsTables) GetAllMstList ¶ added in v1.3.1
func (*MmsTables) GetBothFilesRef ¶ added in v1.0.0
func (*MmsTables) GetCSFiles ¶ added in v1.1.0
func (*MmsTables) GetFileSeq ¶ added in v1.0.0
func (*MmsTables) GetMstFileStat ¶
func (m *MmsTables) GetMstFileStat() *statistics.FileStat
func (*MmsTables) GetMstInfo ¶ added in v1.2.0
func (m *MmsTables) GetMstInfo(name string) (*meta.MeasurementInfo, bool)
func (*MmsTables) GetObsOption ¶ added in v1.3.0
func (m *MmsTables) GetObsOption() *obs.ObsOptions
func (*MmsTables) GetOutOfOrderFileNum ¶
func (*MmsTables) GetRowCountsBySid ¶
func (*MmsTables) GetShardID ¶ added in v1.3.0
func (*MmsTables) GetTSSPFiles ¶ added in v1.0.0
func (*MmsTables) GetTableFileNum ¶ added in v1.3.0
func (*MmsTables) IsOutOfOrderFilesExist ¶ added in v1.0.0
func (*MmsTables) Listen ¶ added in v1.0.0
func (m *MmsTables) Listen(signal chan struct{}, onClose func())
func (*MmsTables) LoadSequencer ¶ added in v1.3.0
func (m *MmsTables) LoadSequencer()
func (*MmsTables) MergeDisable ¶
func (m *MmsTables) MergeDisable()
func (*MmsTables) MergeEnable ¶
func (m *MmsTables) MergeEnable()
func (*MmsTables) MergeEnabled ¶
func (*MmsTables) MergeOutOfOrder ¶
func (*MmsTables) NewChunkIterators ¶
func (m *MmsTables) NewChunkIterators(group FilesInfo) *ChunkIterators
func (*MmsTables) NewStreamIterators ¶
func (m *MmsTables) NewStreamIterators(group FilesInfo) *StreamIterators
func (*MmsTables) NewStreamWriteFile ¶ added in v1.0.0
func (m *MmsTables) NewStreamWriteFile(mst string) *StreamWriteFile
func (*MmsTables) NextSequence ¶
func (*MmsTables) ReloadSequencer ¶ added in v1.1.0
func (*MmsTables) RenameFileToLevel ¶ added in v1.3.0
func (m *MmsTables) RenameFileToLevel(plan *CompactGroup) error
func (*MmsTables) ReplaceDownSampleFiles ¶ added in v1.0.0
func (*MmsTables) ReplaceFiles ¶
func (*MmsTables) ReplacePKFile ¶ added in v1.1.1
func (*MmsTables) SeriesTotal ¶ added in v1.1.0
func (*MmsTables) SetAccumulateMetaIndex ¶ added in v1.2.0
func (m *MmsTables) SetAccumulateMetaIndex(name string, aMetaIndex *AccumulateMetaIndex)
func (*MmsTables) SetAddFunc ¶ added in v1.0.0
func (*MmsTables) SetImmTableType ¶ added in v1.1.0
func (m *MmsTables) SetImmTableType(engineType config.EngineType)
func (*MmsTables) SetIndexMergeSet ¶ added in v1.3.0
func (m *MmsTables) SetIndexMergeSet(idx IndexMergeSet)
func (*MmsTables) SetLockPath ¶ added in v1.2.0
func (*MmsTables) SetMstInfo ¶ added in v1.1.1
func (m *MmsTables) SetMstInfo(name string, mstInfo *meta.MeasurementInfo)
func (*MmsTables) SetObsOption ¶ added in v1.3.0
func (m *MmsTables) SetObsOption(option *obs.ObsOptions)
type MsBuilder ¶
type MsBuilder struct { Path string TableData Conf *Config MaxIds int RowCount int64 ShardID uint64 Files []TSSPFile FilesInfo []FileInfoExtend FileName TSSPFileName EncodeChunkDataImp EncodeChunkData // contains filtered or unexported fields }
func NewDetachedMsBuilder ¶ added in v1.2.0
func NewMsBuilder ¶ added in v1.1.0
func NewMsBuilder(dir, name string, lockPath *string, conf *Config, idCount int, fileName TSSPFileName, tier uint64, sequencer *Sequencer, estimateSize int, engineType config.EngineType, obsOpt *obs.ObsOptions, shardID uint64) *MsBuilder
func (*MsBuilder) BloomFilterNeedDetached ¶ added in v1.2.0
func (*MsBuilder) CloseIndexWriters ¶ added in v1.3.0
func (*MsBuilder) FileVersion ¶
func (*MsBuilder) GetChunkBuilder ¶ added in v1.2.0
func (b *MsBuilder) GetChunkBuilder() *ChunkDataBuilder
func (*MsBuilder) GetFullTextIdx ¶ added in v1.2.0
func (*MsBuilder) GetIndexBuilder ¶ added in v1.3.0
func (b *MsBuilder) GetIndexBuilder() *index.IndexWriterBuilder
func (*MsBuilder) GetLocalBfCount ¶ added in v1.2.0
func (*MsBuilder) GetPKInfoNum ¶ added in v1.1.0
func (*MsBuilder) GetPKMark ¶ added in v1.1.0
func (b *MsBuilder) GetPKMark(i int) fragment.IndexFragment
func (*MsBuilder) GetPKRecord ¶ added in v1.1.0
func (*MsBuilder) MaxRowsPerSegment ¶
func (*MsBuilder) NewIndexWriterBuilder ¶ added in v1.3.0
func (b *MsBuilder) NewIndexWriterBuilder(schema record.Schemas, indexRelation influxql.IndexRelation)
func (*MsBuilder) NewPKIndexWriter ¶ added in v1.1.0
func (b *MsBuilder) NewPKIndexWriter()
func (*MsBuilder) SetEncodeChunkDataImp ¶ added in v1.2.0
func (b *MsBuilder) SetEncodeChunkDataImp(engineType config.EngineType)
func (*MsBuilder) SetFullTextIdx ¶ added in v1.2.0
func (*MsBuilder) SetLocalBfCount ¶ added in v1.2.0
func (*MsBuilder) SetTCLocation ¶ added in v1.1.1
func (*MsBuilder) SetTimeSorted ¶ added in v1.2.0
func (*MsBuilder) StoreTimes ¶ added in v1.1.0
func (b *MsBuilder) StoreTimes()
func (*MsBuilder) SwitchChunkMeta ¶ added in v1.2.0
func (*MsBuilder) WriteChunkMeta ¶ added in v1.2.0
func (*MsBuilder) WriteDetached ¶ added in v1.2.0
func (*MsBuilder) WriteDetachedIndex ¶ added in v1.3.0
func (*MsBuilder) WriteDetachedMetaAndIndex ¶ added in v1.2.0
func (*MsBuilder) WriteRecord ¶
type PageCacheReader ¶ added in v1.2.0
type PageCacheReader struct {
// contains filtered or unexported fields
}
func NewPageCacheReader ¶ added in v1.2.0
func NewPageCacheReader(t *Trailer, r *tsspFileReader) *PageCacheReader
func (*PageCacheReader) GetCachePageIdsAndOffsets ¶ added in v1.2.0
func (pcr *PageCacheReader) GetCachePageIdsAndOffsets(start int64, size uint32) ([]int64, []int64, error)
get all cache pageIds containning bytes from start to start + size
func (*PageCacheReader) GetMaxPageIdAndOffset ¶ added in v1.2.0
func (pcr *PageCacheReader) GetMaxPageIdAndOffset() (int64, int64)
func (*PageCacheReader) Init ¶ added in v1.2.0
func (pcr *PageCacheReader) Init()
func (*PageCacheReader) ReadFixPageSize ¶ added in v1.2.0
func (pcr *PageCacheReader) ReadFixPageSize(offset int64, size uint32, buf *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)
read fileBytes of pages
func (*PageCacheReader) ReadSinglePage ¶ added in v1.2.0
type ParquetTask ¶ added in v1.3.0
func (*ParquetTask) Execute ¶ added in v1.3.0
func (t *ParquetTask) Execute()
func (*ParquetTask) GetSeries ¶ added in v1.3.0
func (t *ParquetTask) GetSeries(sId uint64) (map[string]string, error)
func (*ParquetTask) GetTagKeys ¶ added in v1.4.0
func (t *ParquetTask) GetTagKeys() (map[string]struct{}, error)
func (*ParquetTask) LockFiles ¶ added in v1.4.0
func (t *ParquetTask) LockFiles()
func (*ParquetTask) RemoveLog ¶ added in v1.4.0
func (t *ParquetTask) RemoveLog()
func (*ParquetTask) Stop ¶ added in v1.4.0
func (t *ParquetTask) Stop()
func (*ParquetTask) UnLockFiles ¶ added in v1.4.0
func (t *ParquetTask) UnLockFiles()
type PreAggBuilder ¶
type PreAggBuilder interface {
// contains filtered or unexported methods
}
type PreAggBuilders ¶
type PreAggBuilders struct {
// contains filtered or unexported fields
}
func (*PreAggBuilders) FloatBuilder ¶ added in v1.2.0
func (b *PreAggBuilders) FloatBuilder() *FloatPreAgg
func (*PreAggBuilders) IntegerBuilder ¶ added in v1.2.0
func (b *PreAggBuilders) IntegerBuilder() *IntegerPreAgg
func (*PreAggBuilders) Release ¶
func (b *PreAggBuilders) Release()
type QueryfileCache ¶ added in v1.1.0
type QueryfileCache struct {
// contains filtered or unexported fields
}
func GetQueryfileCache ¶ added in v1.1.0
func GetQueryfileCache() *QueryfileCache
func NewQueryfileCache ¶ added in v1.1.0
func NewQueryfileCache(cap uint32) *QueryfileCache
func (*QueryfileCache) Get ¶ added in v1.1.0
func (qfc *QueryfileCache) Get()
func (*QueryfileCache) GetCap ¶ added in v1.1.0
func (qfc *QueryfileCache) GetCap() uint32
func (*QueryfileCache) Put ¶ added in v1.1.0
func (qfc *QueryfileCache) Put(f TSSPFile)
type ReadContext ¶
type ReadContext struct { Ascending bool // contains filtered or unexported fields }
func NewReadContext ¶
func NewReadContext(ascending bool) *ReadContext
func (*ReadContext) GetCoder ¶ added in v1.1.0
func (d *ReadContext) GetCoder() *encoding.CoderContext
func (*ReadContext) GetOps ¶
func (d *ReadContext) GetOps() []*comm.CallOption
func (*ReadContext) GetReadBuff ¶ added in v1.1.0
func (d *ReadContext) GetReadBuff() []byte
func (*ReadContext) InitPreAggBuilder ¶
func (d *ReadContext) InitPreAggBuilder()
func (*ReadContext) IsAborted ¶ added in v1.3.0
func (d *ReadContext) IsAborted() bool
func (*ReadContext) MatchPreAgg ¶
func (d *ReadContext) MatchPreAgg() bool
func (*ReadContext) Release ¶
func (d *ReadContext) Release()
func (*ReadContext) Reset ¶
func (d *ReadContext) Reset()
func (*ReadContext) Set ¶
func (d *ReadContext) Set(ascending bool, tr util.TimeRange, onlyFirstOrLast bool, ops []*comm.CallOption)
func (*ReadContext) SetClosedSignal ¶ added in v1.2.0
func (d *ReadContext) SetClosedSignal(s *bool)
func (*ReadContext) SetOps ¶
func (d *ReadContext) SetOps(c []*comm.CallOption)
func (*ReadContext) SetSpan ¶ added in v1.1.0
func (d *ReadContext) SetSpan(readSpan, filterSpan *tracing.Span)
func (*ReadContext) SetTr ¶
func (d *ReadContext) SetTr(tr util.TimeRange)
type Segment ¶
type Segment struct {
// contains filtered or unexported fields
}
Segment offset/size/minT/maxT
type SegmentMeta ¶ added in v1.2.0
type SegmentMeta struct {
// contains filtered or unexported fields
}
func NewSegmentMeta ¶ added in v1.2.0
func NewSegmentMeta(id int, c *ChunkMeta) *SegmentMeta
func (*SegmentMeta) GetMaxTime ¶ added in v1.2.0
func (s *SegmentMeta) GetMaxTime() int64
func (*SegmentMeta) GetMinTime ¶ added in v1.2.0
func (s *SegmentMeta) GetMinTime() int64
type SegmentRange ¶
type SegmentRange [2]int64 // min/max
type SegmentReader ¶ added in v1.0.0
type SegmentReader struct {
// contains filtered or unexported fields
}
func NewSegmentReader ¶ added in v1.0.0
func NewSegmentReader(fi *FileIterator) *SegmentReader
type SegmentSequenceReader ¶ added in v1.3.0
type SegmentSequenceReader struct {
// contains filtered or unexported fields
}
func NewSegmentSequenceReader ¶ added in v1.3.0
func NewSegmentSequenceReader(path *sparseindex.OBSFilterPath, taskID int, count uint64, consumeInfo *consume.ConsumeInfo, schema record.Schemas, filterOpt *FilterOptions) (*SegmentSequenceReader, error)
func (*SegmentSequenceReader) Close ¶ added in v1.3.0
func (reader *SegmentSequenceReader) Close()
func (*SegmentSequenceReader) ConsumeDateByShard ¶ added in v1.3.0
type SegmentTask ¶ added in v1.2.0
type SegmentTask struct {
// contains filtered or unexported fields
}
type SequenceIterator ¶ added in v1.3.0
type SequenceIterator interface { SetChunkMetasReader(reader SequenceIteratorChunkMetaReader) Release() AddFiles(files []TSSPFile) Stop() Run() error Buffer() *pool.Buffer }
func NewSequenceIterator ¶ added in v1.3.0
func NewSequenceIterator(handler SequenceIteratorHandler, logger *Log.Logger) SequenceIterator
type SequenceIteratorChunkMetaReader ¶ added in v1.3.0
type SequenceIteratorHandler ¶ added in v1.3.0
type SequenceIteratorHandler interface { Init(map[string]interface{}) error Begin() NextFile(TSSPFile) NextChunkMeta(cm *ChunkMeta) error Limited() bool Finish() }
func NewSeriesKeysIteratorHandler ¶ added in v1.4.0
func NewSeriesKeysIteratorHandler(idx IndexMergeSet, condition influxql.Expr, tr *util.TimeRange, limit int) SequenceIteratorHandler
func NewTagValuesIteratorHandler ¶ added in v1.3.0
func NewTagValuesIteratorHandler(idx IndexMergeSet, condition influxql.Expr, tr *util.TimeRange, limit int) SequenceIteratorHandler
type Sequencer ¶
type Sequencer struct {
// contains filtered or unexported fields
}
func NewSequencer ¶
func NewSequencer() *Sequencer
func (*Sequencer) AddRowCounts ¶
func (*Sequencer) BatchUpdateCheckTime ¶
func (s *Sequencer) BatchUpdateCheckTime(p *IdTimePairs, incrRows bool)
func (*Sequencer) DelMmsIdTime ¶ added in v1.1.0
func (*Sequencer) GetMmsIdTime ¶ added in v1.3.0
func (*Sequencer) ResetMmsIdTime ¶ added in v1.1.0
func (s *Sequencer) ResetMmsIdTime()
func (*Sequencer) SeriesTotal ¶ added in v1.1.0
func (*Sequencer) SetToInLoading ¶ added in v1.1.0
type SeriesCounter ¶ added in v1.1.0
type SeriesCounter struct {
// contains filtered or unexported fields
}
func (*SeriesCounter) DecrN ¶ added in v1.1.0
func (sc *SeriesCounter) DecrN(n uint64)
func (*SeriesCounter) Get ¶ added in v1.1.0
func (sc *SeriesCounter) Get() uint64
func (*SeriesCounter) Incr ¶ added in v1.1.0
func (sc *SeriesCounter) Incr()
func (*SeriesCounter) Reset ¶ added in v1.1.0
func (sc *SeriesCounter) Reset()
type SeriesKeys ¶ added in v1.4.0
type SeriesKeys struct {
// contains filtered or unexported fields
}
func (*SeriesKeys) Add ¶ added in v1.4.0
func (s *SeriesKeys) Add(key, _ string)
func (*SeriesKeys) Count ¶ added in v1.4.0
func (s *SeriesKeys) Count() int
func (*SeriesKeys) ForEach ¶ added in v1.4.0
func (s *SeriesKeys) ForEach(process func(key, _ string))
type SeriesKeysIteratorHandler ¶ added in v1.4.0
type SeriesKeysIteratorHandler struct {
// contains filtered or unexported fields
}
func (*SeriesKeysIteratorHandler) Begin ¶ added in v1.4.0
func (h *SeriesKeysIteratorHandler) Begin()
func (*SeriesKeysIteratorHandler) Finish ¶ added in v1.4.0
func (h *SeriesKeysIteratorHandler) Finish()
func (*SeriesKeysIteratorHandler) Init ¶ added in v1.4.0
func (h *SeriesKeysIteratorHandler) Init(param map[string]interface{}) error
func (*SeriesKeysIteratorHandler) Limited ¶ added in v1.4.0
func (h *SeriesKeysIteratorHandler) Limited() bool
func (*SeriesKeysIteratorHandler) NextChunkMeta ¶ added in v1.4.0
func (h *SeriesKeysIteratorHandler) NextChunkMeta(cm *ChunkMeta) error
func (*SeriesKeysIteratorHandler) NextFile ¶ added in v1.4.0
func (h *SeriesKeysIteratorHandler) NextFile(TSSPFile)
type SortKeyIterator ¶ added in v1.1.1
type SortKeyIterator struct { *FileIterator // contains filtered or unexported fields }
func NewSortKeyIterator ¶ added in v1.1.1
func NewSortKeyIterator(fi *FileIterator, sortKeyFields []record.Field, ctx *ReadContext, schema record.Schemas, tcDuration time.Duration, compactWithBlock bool, fileIdx int) (*SortKeyIterator, error)
func (*SortKeyIterator) GetNewRecord ¶ added in v1.1.1
func (s *SortKeyIterator) GetNewRecord(tcDuration time.Duration, compactWithBlock bool) error
func (*SortKeyIterator) NextSingleFragment ¶ added in v1.1.1
func (s *SortKeyIterator) NextSingleFragment(tbStore *MmsTables, impl *IteratorByRow, pkSchema record.Schemas) (*record.Record, error)
type SortLimitCursor ¶ added in v1.3.0
type SortLimitCursor struct {
// contains filtered or unexported fields
}
func NewSortLimitCursor ¶ added in v1.3.0
func NewSortLimitCursor(options hybridqp.Options, schemas record.Schemas, input comm.TimeCutKeyCursor, shardId int64) *SortLimitCursor
func (*SortLimitCursor) Close ¶ added in v1.3.0
func (t *SortLimitCursor) Close() error
func (*SortLimitCursor) EndSpan ¶ added in v1.3.0
func (t *SortLimitCursor) EndSpan()
func (*SortLimitCursor) GetInput ¶ added in v1.3.0
func (t *SortLimitCursor) GetInput() comm.TimeCutKeyCursor
func (*SortLimitCursor) GetSchema ¶ added in v1.3.0
func (t *SortLimitCursor) GetSchema() record.Schemas
func (*SortLimitCursor) Name ¶ added in v1.3.0
func (t *SortLimitCursor) Name() string
func (*SortLimitCursor) Next ¶ added in v1.3.0
func (t *SortLimitCursor) Next() (*record.Record, comm.SeriesInfoIntf, error)
func (*SortLimitCursor) NextAggData ¶ added in v1.3.0
func (*SortLimitCursor) SetOps ¶ added in v1.3.0
func (t *SortLimitCursor) SetOps(ops []*comm.CallOption)
func (*SortLimitCursor) SinkPlan ¶ added in v1.3.0
func (t *SortLimitCursor) SinkPlan(plan hybridqp.QueryNode)
func (*SortLimitCursor) StartSpan ¶ added in v1.3.0
func (t *SortLimitCursor) StartSpan(span *tracing.Span)
type SortLimitRows ¶ added in v1.3.0
type SortLimitRows struct {
// contains filtered or unexported fields
}
func NewSortLimitRows ¶ added in v1.3.0
func NewSortLimitRows(sortIndex []int, schema record.Schemas, shardId int64) *SortLimitRows
func (*SortLimitRows) Len ¶ added in v1.3.0
func (rs *SortLimitRows) Len() int
func (*SortLimitRows) Less ¶ added in v1.3.0
func (rs *SortLimitRows) Less(i, j int) bool
func (*SortLimitRows) Pop ¶ added in v1.3.0
func (rs *SortLimitRows) Pop() interface{}
func (*SortLimitRows) PopToRec ¶ added in v1.3.0
func (rs *SortLimitRows) PopToRec() *record.Record
func (*SortLimitRows) Push ¶ added in v1.3.0
func (rs *SortLimitRows) Push(x interface{})
func (*SortLimitRows) Swap ¶ added in v1.3.0
func (rs *SortLimitRows) Swap(i, j int)
type StreamCompactParquetEvent ¶ added in v1.3.0
type StreamCompactParquetEvent struct {
TSSP2ParquetEvent
}
func (*StreamCompactParquetEvent) Instance ¶ added in v1.3.0
func (e *StreamCompactParquetEvent) Instance() Event
func (*StreamCompactParquetEvent) OnWriteChunkMeta ¶ added in v1.3.0
func (e *StreamCompactParquetEvent) OnWriteChunkMeta(cm *ChunkMeta)
type StreamIterator ¶
type StreamIterator struct { *FileIterator // contains filtered or unexported fields }
func NewStreamStreamIterator ¶
func NewStreamStreamIterator(fi *FileIterator) *StreamIterator
type StreamIterators ¶
func (*StreamIterators) Close ¶
func (c *StreamIterators) Close()
func (*StreamIterators) FileVersion ¶
func (c *StreamIterators) FileVersion() uint64
func (*StreamIterators) Flush ¶
func (c *StreamIterators) Flush() error
func (*StreamIterators) Init ¶
func (c *StreamIterators) Init(id uint64, chunkDataOffset int64, schema record.Schemas)
func (*StreamIterators) InitEvents ¶ added in v1.3.0
func (c *StreamIterators) InitEvents(level uint16) *Events
func (*StreamIterators) Len ¶
func (c *StreamIterators) Len() int
func (*StreamIterators) Less ¶
func (c *StreamIterators) Less(i, j int) bool
func (*StreamIterators) ListenCloseSignal ¶ added in v1.2.0
func (c *StreamIterators) ListenCloseSignal(finish chan struct{})
func (*StreamIterators) NewFile ¶
func (c *StreamIterators) NewFile(addFileExt bool) error
func (*StreamIterators) NewTSSPFile ¶
func (c *StreamIterators) NewTSSPFile(tmp bool) (TSSPFile, error)
func (*StreamIterators) Pop ¶
func (c *StreamIterators) Pop() interface{}
func (*StreamIterators) Push ¶
func (c *StreamIterators) Push(v interface{})
func (*StreamIterators) RemoveTmpFiles ¶ added in v1.3.0
func (c *StreamIterators) RemoveTmpFiles()
func (*StreamIterators) SetWriter ¶ added in v1.2.0
func (c *StreamIterators) SetWriter(w fileops.FileWriter)
func (*StreamIterators) Size ¶
func (c *StreamIterators) Size() int64
func (*StreamIterators) Swap ¶
func (c *StreamIterators) Swap(i, j int)
func (*StreamIterators) SwitchChunkMeta ¶ added in v1.2.0
func (c *StreamIterators) SwitchChunkMeta() error
func (*StreamIterators) WithLog ¶
func (c *StreamIterators) WithLog(log *Log.Logger)
func (*StreamIterators) WriteChunkMeta ¶ added in v1.2.0
func (c *StreamIterators) WriteChunkMeta(cm *ChunkMeta) (int, error)
type StreamIteratorsPool ¶
type StreamIteratorsPool struct {
// contains filtered or unexported fields
}
func NewStreamIteratorsPool ¶
func NewStreamIteratorsPool(n int) *StreamIteratorsPool
type StreamWriteFile ¶ added in v1.0.0
func NewWriteScanFile ¶ added in v1.0.0
func (*StreamWriteFile) AppendColumn ¶ added in v1.0.0
func (c *StreamWriteFile) AppendColumn(ref *record.Field) error
func (*StreamWriteFile) ChangeColumn ¶ added in v1.0.0
func (c *StreamWriteFile) ChangeColumn(ref record.Field) error
func (*StreamWriteFile) ChangeSid ¶ added in v1.0.0
func (c *StreamWriteFile) ChangeSid(sid uint64)
func (*StreamWriteFile) Close ¶ added in v1.0.0
func (c *StreamWriteFile) Close(isError bool)
func (*StreamWriteFile) Flush ¶ added in v1.0.0
func (c *StreamWriteFile) Flush() error
func (*StreamWriteFile) GetTSSPFile ¶ added in v1.0.0
func (c *StreamWriteFile) GetTSSPFile() TSSPFile
func (*StreamWriteFile) Init ¶ added in v1.0.0
func (c *StreamWriteFile) Init(id uint64, chunkDataOffset int64, schema record.Schemas)
func (*StreamWriteFile) InitFile ¶ added in v1.0.0
func (c *StreamWriteFile) InitFile(seq uint64) error
func (*StreamWriteFile) InitMergedFile ¶ added in v1.0.0
func (c *StreamWriteFile) InitMergedFile(f TSSPFile) error
func (*StreamWriteFile) NewFile ¶ added in v1.0.0
func (c *StreamWriteFile) NewFile(addFileExt bool) error
func (*StreamWriteFile) NewTSSPFile ¶ added in v1.0.0
func (c *StreamWriteFile) NewTSSPFile(tmp bool) (TSSPFile, error)
func (*StreamWriteFile) SetValidate ¶ added in v1.1.0
func (c *StreamWriteFile) SetValidate(en bool)
func (*StreamWriteFile) Size ¶ added in v1.0.0
func (c *StreamWriteFile) Size() int64
func (*StreamWriteFile) SortColumns ¶ added in v1.2.0
func (c *StreamWriteFile) SortColumns()
func (*StreamWriteFile) SwitchChunkMeta ¶ added in v1.2.0
func (c *StreamWriteFile) SwitchChunkMeta() error
func (*StreamWriteFile) WriteChunkMeta ¶ added in v1.2.0
func (c *StreamWriteFile) WriteChunkMeta(cm *ChunkMeta) (int, error)
func (*StreamWriteFile) WriteCurrentMeta ¶ added in v1.0.0
func (c *StreamWriteFile) WriteCurrentMeta() error
func (*StreamWriteFile) WriteFile ¶ added in v1.0.0
func (c *StreamWriteFile) WriteFile() error
func (*StreamWriteFile) WriteMeta ¶ added in v1.0.0
func (c *StreamWriteFile) WriteMeta(cm *ChunkMeta) error
type StringPreAgg ¶
type StringPreAgg struct {
// contains filtered or unexported fields
}
StringPreAgg If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.
func NewStringPreAgg ¶
func NewStringPreAgg() *StringPreAgg
type TSSP2ParquetEvent ¶ added in v1.3.0
type TSSP2ParquetEvent struct { Event // contains filtered or unexported fields }
func (*TSSP2ParquetEvent) Enable ¶ added in v1.3.0
func (e *TSSP2ParquetEvent) Enable() bool
func (*TSSP2ParquetEvent) Init ¶ added in v1.3.0
func (e *TSSP2ParquetEvent) Init(mst string, level uint16)
func (*TSSP2ParquetEvent) OnFinish ¶ added in v1.3.0
func (e *TSSP2ParquetEvent) OnFinish(ctx *EventContext)
func (*TSSP2ParquetEvent) OnInterrupt ¶ added in v1.3.0
func (e *TSSP2ParquetEvent) OnInterrupt()
func (*TSSP2ParquetEvent) OnNewFile ¶ added in v1.3.0
func (e *TSSP2ParquetEvent) OnNewFile(f TSSPFile)
func (*TSSP2ParquetEvent) OnReplaceFile ¶ added in v1.3.0
func (e *TSSP2ParquetEvent) OnReplaceFile(shardDir string, lockFile string) error
type TSSP2ParquetPlan ¶ added in v1.3.0
type TSSP2ParquetPlan struct { Mst string Schema map[string]uint8 Files []string // contains filtered or unexported fields }
func (*TSSP2ParquetPlan) Init ¶ added in v1.3.0
func (p *TSSP2ParquetPlan) Init(mst string, level uint16)
type TSSPFile ¶
type TSSPFile interface { Path() string Name() string FileName() TSSPFileName LevelAndSequence() (uint16, uint64) FileNameMerge() uint16 FileNameExtend() uint16 IsOrder() bool Ref() Unref() RefFileReader() UnrefFileReader() Stop() Inuse() bool MetaIndexAt(idx int) (*MetaIndex, error) MetaIndex(id uint64, tr util.TimeRange) (int, *MetaIndex, error) ChunkMeta(id uint64, offset int64, size, itemCount uint32, metaIdx int, ctx *ChunkMetaContext, ioPriority int) (*ChunkMeta, error) ReadAt(cm *ChunkMeta, segment int, dst *record.Record, decs *ReadContext, ioPriority int) (*record.Record, error) ReadData(offset int64, size uint32, dst *[]byte, ioPriority int) ([]byte, error) ReadChunkMetaData(metaIdx int, m *MetaIndex, dst []ChunkMeta, ioPriority int) ([]ChunkMeta, error) FileStat() *Trailer // FileSize get the size of the disk occupied by file FileSize() int64 // InMemSize get the size of the memory occupied by file InMemSize() int64 Contains(id uint64) (bool, error) ContainsByTime(tr util.TimeRange) (bool, error) ContainsValue(id uint64, tr util.TimeRange) (bool, error) MinMaxTime() (int64, int64, error) Open() error Close() error LoadIntoMemory() error LoadComponents() error LoadIdTimes(p *IdTimePairs) error Rename(newName string) error UpdateLevel(level uint16) Remove() error FreeMemory() FreeFileHandle() error Version() uint64 AverageChunkRows() int MaxChunkRows() int MetaIndexItemNum() int64 GetFileReaderRef() int64 RenameOnObs(obsName string, tmp bool, opt *obs.ObsOptions) error ChunkMetaCompressMode() uint8 }
type TSSPFileAttachedReader ¶ added in v1.2.0
type TSSPFileAttachedReader struct {
// contains filtered or unexported fields
}
func NewTSSPFileAttachedReader ¶ added in v1.2.0
func NewTSSPFileAttachedReader(files []TSSPFile, fragRanges []fragment.FragmentRanges, ctx *FileReaderContext, schema hybridqp.Options, unnest *influxql.Unnest) (*TSSPFileAttachedReader, error)
func (*TSSPFileAttachedReader) Close ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) Close() error
func (*TSSPFileAttachedReader) EndSpan ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) EndSpan()
func (*TSSPFileAttachedReader) GetSchema ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) GetSchema() record.Schemas
func (*TSSPFileAttachedReader) Name ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) Name() string
func (*TSSPFileAttachedReader) Next ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) Next() (*record.Record, comm.SeriesInfoIntf, error)
func (*TSSPFileAttachedReader) NextAggData ¶ added in v1.2.0
func (*TSSPFileAttachedReader) ResetBy ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) ResetBy(files []TSSPFile, fragRanges []fragment.FragmentRanges) error
func (*TSSPFileAttachedReader) SetOps ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) SetOps(ops []*comm.CallOption)
func (*TSSPFileAttachedReader) SinkPlan ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) SinkPlan(plan hybridqp.QueryNode)
func (*TSSPFileAttachedReader) StartSpan ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) StartSpan(span *tracing.Span)
type TSSPFileDetachedReader ¶ added in v1.2.0
type TSSPFileDetachedReader struct {
// contains filtered or unexported fields
}
func NewTSSPFileDetachedReader ¶ added in v1.2.0
func NewTSSPFileDetachedReader(metaIndex []*MetaIndex, blocks [][]int, ctx *FileReaderContext, path *sparseindex.OBSFilterPath, unnest *influxql.Unnest, isSort bool, options hybridqp.Options) (*TSSPFileDetachedReader, error)
func (*TSSPFileDetachedReader) Close ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) Close() error
func (*TSSPFileDetachedReader) EndSpan ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) EndSpan()
func (*TSSPFileDetachedReader) GetSchema ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) GetSchema() record.Schemas
func (*TSSPFileDetachedReader) Name ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) Name() string
func (*TSSPFileDetachedReader) Next ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) Next() (*record.Record, comm.SeriesInfoIntf, error)
func (*TSSPFileDetachedReader) NextAggData ¶ added in v1.2.0
func (*TSSPFileDetachedReader) ResetBy ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) ResetBy(metaIndex []*MetaIndex, blocks [][]int, ctx *FileReaderContext)
func (*TSSPFileDetachedReader) SetOps ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) SetOps(ops []*comm.CallOption)
func (*TSSPFileDetachedReader) SinkPlan ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) SinkPlan(plan hybridqp.QueryNode)
func (*TSSPFileDetachedReader) StartSpan ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) StartSpan(span *tracing.Span)
func (*TSSPFileDetachedReader) UpdateTime ¶ added in v1.3.0
func (t *TSSPFileDetachedReader) UpdateTime(time int64)
type TSSPFileName ¶
type TSSPFileName struct {
// contains filtered or unexported fields
}
func NewTSSPFileName ¶
func NewTSSPFileName(seq uint64, level, merge, extent uint16, order bool, lockPath *string) TSSPFileName
func (*TSSPFileName) Equal ¶
func (n *TSSPFileName) Equal(other *TSSPFileName) bool
func (*TSSPFileName) ParseFileName ¶
func (n *TSSPFileName) ParseFileName(name string) error
func (*TSSPFileName) SetExtend ¶
func (n *TSSPFileName) SetExtend(extend uint16)
func (*TSSPFileName) SetLevel ¶
func (n *TSSPFileName) SetLevel(l uint16)
func (*TSSPFileName) SetMerge ¶
func (n *TSSPFileName) SetMerge(merge uint16)
func (*TSSPFileName) SetOrder ¶
func (n *TSSPFileName) SetOrder(v bool)
func (*TSSPFileName) SetSeq ¶
func (n *TSSPFileName) SetSeq(seq uint64)
func (*TSSPFileName) String ¶
func (n *TSSPFileName) String() string
func (*TSSPFileName) TmpPath ¶
func (n *TSSPFileName) TmpPath(dir string) string
type TSSPFiles ¶
type TSSPFiles struct {
// contains filtered or unexported fields
}
func NewTSSPFiles ¶
func NewTSSPFiles() *TSSPFiles
func (*TSSPFiles) AppendReloadFiles ¶ added in v1.4.0
func (*TSSPFiles) MergedLevelCount ¶ added in v1.4.0
type TableReaders ¶
type TableReaders []TSSPFile
func (TableReaders) Len ¶
func (tables TableReaders) Len() int
func (TableReaders) Less ¶
func (tables TableReaders) Less(i, j int) bool
func (TableReaders) Swap ¶
func (tables TableReaders) Swap(i, j int)
type TableStoreGC ¶
type TableStoreGC struct {
// contains filtered or unexported fields
}
func (*TableStoreGC) Add ¶
func (sgc *TableStoreGC) Add(files ...TSSPFile)
func (*TableStoreGC) GC ¶
func (sgc *TableStoreGC) GC()
type TablesGC ¶
type TablesGC interface { Add(files ...TSSPFile) GC() }
func NewTableStoreGC ¶
func NewTableStoreGC() TablesGC
type TablesStore ¶
type TablesStore interface { SetOpId(shardId uint64, opId uint64) Open() (int64, error) Close() error AddTable(ms *MsBuilder, isOrder bool, tmp bool) AddTSSPFiles(name string, isOrder bool, f ...TSSPFile) AddBothTSSPFiles(flushed *bool, name string, orderFiles []TSSPFile, unorderFiles []TSSPFile) AddPKFile(name, file string, rec *record.Record, mark fragment.IndexFragment, tcLocation int8) GetPKFile(mstName string, file string) (pkInfo *colstore.PKInfo, ok bool) FreeAllMemReader() ReplaceFiles(name string, oldFiles, newFiles []TSSPFile, isOrder bool) error GetBothFilesRef(measurement string, hasTimeFilter bool, tr util.TimeRange, flushed *bool) ([]TSSPFile, []TSSPFile, bool) ReplaceDownSampleFiles(mstNames []string, originFiles [][]TSSPFile, newFiles [][]TSSPFile, isOrder bool, callBack func()) error NextSequence() uint64 Sequencer() *Sequencer GetTSSPFiles(mm string, isOrder bool) (*TSSPFiles, bool) GetCSFiles(mm string) (*TSSPFiles, bool) CopyCSFiles(name string) []TSSPFile Tier() uint64 SetTier(tier uint64) File(name string, namePath string, isOrder bool) TSSPFile CompactDone(seq []string) CompactionEnable() CompactionDisable() MergeEnable() MergeDisable() CompactionEnabled() bool MergeEnabled() bool IsOutOfOrderFilesExist() bool MergeOutOfOrder(shId uint64, full bool, force bool) error LevelCompact(level uint16, shid uint64) error FullCompact(shid uint64) error SetAddFunc(addFunc func(int64)) LoadSequencer() GetRowCountsBySid(measurement string, sid uint64) (int64, error) AddRowCountsBySid(measurement string, sid uint64, rowCounts int64) GetOutOfOrderFileNum() int GetTableFileNum(string, bool) int GetMstFileStat() *stats.FileStat DropMeasurement(ctx context.Context, name string) error GetFileSeq() uint64 DisableCompAndMerge() EnableCompAndMerge() FreeSequencer() bool SetImmTableType(engineType config.EngineType) SetMstInfo(name string, mstInfo *meta.MeasurementInfo) SetAccumulateMetaIndex(name string, aMetaIndex *AccumulateMetaIndex) GetMstInfo(name string) (*meta.MeasurementInfo, bool) SeriesTotal() uint64 SetLockPath(lock *string) FullyCompacted() bool SetObsOption(option *obs.ObsOptions) GetObsOption() *obs.ObsOptions GetShardID() uint64 SetIndexMergeSet(idx IndexMergeSet) GetAllMstList() []string }
type TagValuesIteratorHandler ¶ added in v1.3.0
type TagValuesIteratorHandler struct {
// contains filtered or unexported fields
}
func (*TagValuesIteratorHandler) Begin ¶ added in v1.3.0
func (h *TagValuesIteratorHandler) Begin()
func (*TagValuesIteratorHandler) Finish ¶ added in v1.3.0
func (h *TagValuesIteratorHandler) Finish()
func (*TagValuesIteratorHandler) Init ¶ added in v1.3.0
func (h *TagValuesIteratorHandler) Init(param map[string]interface{}) error
func (*TagValuesIteratorHandler) Limited ¶ added in v1.3.0
func (h *TagValuesIteratorHandler) Limited() bool
func (*TagValuesIteratorHandler) NextChunkMeta ¶ added in v1.3.0
func (h *TagValuesIteratorHandler) NextChunkMeta(cm *ChunkMeta) error
func (*TagValuesIteratorHandler) NextFile ¶ added in v1.3.0
func (h *TagValuesIteratorHandler) NextFile(TSSPFile)
type TimePreAgg ¶
type TimePreAgg struct {
// contains filtered or unexported fields
}
func NewTimePreAgg ¶
func NewTimePreAgg() *TimePreAgg
type Trailer ¶
type Trailer struct { TableStat // contains filtered or unexported fields }
func (*Trailer) ContainsId ¶
func (*Trailer) MetaIndexItemNum ¶ added in v1.0.0
func (*Trailer) MetaIndexSize ¶ added in v1.2.0
func (*Trailer) SetChunkMetaCompressFlag ¶ added in v1.2.0
func (t *Trailer) SetChunkMetaCompressFlag()
type TsChunkDataImp ¶ added in v1.2.0
type TsChunkDataImp struct { }
func (*TsChunkDataImp) EncodeChunk ¶ added in v1.2.0
func (t *TsChunkDataImp) EncodeChunk(b *ChunkDataBuilder, id uint64, offset int64, rec *record.Record, dst []byte, timeSorted bool) ([]byte, error)
func (*TsChunkDataImp) EncodeChunkForCompaction ¶ added in v1.2.0
func (t *TsChunkDataImp) EncodeChunkForCompaction(b *ChunkDataBuilder, offset int64, rec *record.Record, dst []byte, accumulateRowsIndex []int, timeSorted bool) ([]byte, error)
func (*TsChunkDataImp) SetAccumulateRowsIndex ¶ added in v1.2.0
func (t *TsChunkDataImp) SetAccumulateRowsIndex(accumulateRowsIndex []int)
func (*TsChunkDataImp) SetDetachedInfo ¶ added in v1.2.0
func (t *TsChunkDataImp) SetDetachedInfo(writeDetached bool)
type UnnestOperator ¶ added in v1.3.0
func GetUnnestFuncOperator ¶ added in v1.3.0
func NewMatchAllOperator ¶ added in v1.3.0
func NewMatchAllOperator(unnest *influxql.Unnest, schemas record.Schemas) UnnestOperator
type UnorderedColumn ¶ added in v1.3.0
type UnorderedColumn struct {
// contains filtered or unexported fields
}
func (*UnorderedColumn) Init ¶ added in v1.3.0
func (c *UnorderedColumn) Init(meta *ColumnMeta, idx int)
type UnorderedColumnReader ¶ added in v1.0.0
type UnorderedColumnReader struct {
// contains filtered or unexported fields
}
func (*UnorderedColumnReader) ChangeColumn ¶ added in v1.3.0
func (r *UnorderedColumnReader) ChangeColumn(sid uint64, ref *record.Field)
func (*UnorderedColumnReader) ChangeSeries ¶ added in v1.3.0
func (r *UnorderedColumnReader) ChangeSeries(sid uint64) error
func (*UnorderedColumnReader) Close ¶ added in v1.3.0
func (r *UnorderedColumnReader) Close()
func (*UnorderedColumnReader) HasColumn ¶ added in v1.3.0
func (r *UnorderedColumnReader) HasColumn() bool
func (*UnorderedColumnReader) MatchSeries ¶ added in v1.3.0
func (r *UnorderedColumnReader) MatchSeries(sid uint64) bool
func (*UnorderedColumnReader) ReadSchemas ¶ added in v1.3.0
type UnorderedColumns ¶ added in v1.3.0
type UnorderedColumns struct {
// contains filtered or unexported fields
}
func NewUnorderedColumns ¶ added in v1.3.0
func NewUnorderedColumns() *UnorderedColumns
func (*UnorderedColumns) ChangeColumn ¶ added in v1.3.0
func (c *UnorderedColumns) ChangeColumn(name string) *UnorderedColumn
func (*UnorderedColumns) GetLineOffset ¶ added in v1.3.0
func (c *UnorderedColumns) GetLineOffset(name string) int
func (*UnorderedColumns) GetSegOffset ¶ added in v1.3.0
func (c *UnorderedColumns) GetSegOffset(name string) int
func (*UnorderedColumns) IncrLineOffset ¶ added in v1.3.0
func (c *UnorderedColumns) IncrLineOffset(name string, n int)
func (*UnorderedColumns) IncrSegOffset ¶ added in v1.3.0
func (c *UnorderedColumns) IncrSegOffset(name string, n int)
func (*UnorderedColumns) Init ¶ added in v1.3.0
func (c *UnorderedColumns) Init(cm *ChunkMeta)
func (*UnorderedColumns) ReadCompleted ¶ added in v1.3.0
func (c *UnorderedColumns) ReadCompleted() bool
func (*UnorderedColumns) SetRemainLine ¶ added in v1.3.0
func (c *UnorderedColumns) SetRemainLine(n int)
func (*UnorderedColumns) TimeMeta ¶ added in v1.3.0
func (c *UnorderedColumns) TimeMeta() *ColumnMeta
func (*UnorderedColumns) Walk ¶ added in v1.3.0
func (c *UnorderedColumns) Walk(callback func(meta *ColumnMeta))
type UnorderedReader ¶ added in v1.0.0
type UnorderedReader struct {
// contains filtered or unexported fields
}
func NewUnorderedReader ¶ added in v1.0.0
func NewUnorderedReader(log *logger.Logger) *UnorderedReader
func (*UnorderedReader) AddFiles ¶ added in v1.0.0
func (r *UnorderedReader) AddFiles(files []TSSPFile)
func (*UnorderedReader) AllocNilCol ¶ added in v1.1.0
func (*UnorderedReader) ChangeColumn ¶ added in v1.3.0
func (r *UnorderedReader) ChangeColumn(sid uint64, ref *record.Field)
func (*UnorderedReader) ChangeSeries ¶ added in v1.3.0
func (r *UnorderedReader) ChangeSeries(sid uint64) error
func (*UnorderedReader) Close ¶ added in v1.0.0
func (r *UnorderedReader) Close()
func (*UnorderedReader) CloseFile ¶ added in v1.3.0
func (r *UnorderedReader) CloseFile()
func (*UnorderedReader) HasSeries ¶ added in v1.1.0
func (r *UnorderedReader) HasSeries(sid uint64) bool
func (*UnorderedReader) InitTimes ¶ added in v1.0.1
func (r *UnorderedReader) InitTimes(sid uint64, maxTime int64)
InitTimes initialize the time column of unordered data
func (*UnorderedReader) Read ¶ added in v1.0.0
Read reads data based on the series ID, column, and time range
func (*UnorderedReader) ReadAllTimes ¶ added in v1.0.1
func (r *UnorderedReader) ReadAllTimes() []int64
func (*UnorderedReader) ReadRemain ¶ added in v1.0.0
func (r *UnorderedReader) ReadRemain(sid uint64, cb remainCallback) error
func (*UnorderedReader) ReadSeriesSchemas ¶ added in v1.0.0
func (r *UnorderedReader) ReadSeriesSchemas(sid uint64, maxTime int64) record.Schemas
func (*UnorderedReader) ReadTimes ¶ added in v1.0.0
func (r *UnorderedReader) ReadTimes(maxTime int64) []int64
type UnorderedReaderContext ¶ added in v1.1.0
type UnorderedReaderContext struct {
// contains filtered or unexported fields
}
Source Files
¶
- chunk_iterators.go
- chunk_meta_cache.go
- chunk_meta_codec.go
- chunkdata_builder.go
- chunkdata_builder_cs.go
- chunkdata_builder_ts.go
- colstore_compact.go
- column_builder.go
- column_iterator.go
- compact.go
- compaction_file_info.go
- config.go
- cs_mms_tables.go
- detached_cache.go
- detached_chunkmeta.go
- detached_meta_index.go
- detached_metadata.go
- detached_pk_data.go
- detached_pk_meta.go
- detached_pk_meta_info.go
- event_bus.go
- evict.go
- file_iterator.go
- first_last_reader.go
- hot.go
- index_swapper.go
- index_writer.go
- limiter.go
- location.go
- location_cursor.go
- merge_out_of_order.go
- merge_performer.go
- merge_self.go
- merge_tool.go
- merge_util.go
- meta_data.go
- mms_loader.go
- mms_tables.go
- msbuilder.go
- pre_aggregation.go
- read_context.go
- reader.go
- segment_sequence_reader.go
- sequence_iterator.go
- sequencer.go
- show_series.go
- show_tag_values.go
- sort_limit_cursor.go
- sort_limit_row_heap.go
- stream_compact.go
- stream_downsample.go
- table.go
- table_stat.go
- task.go
- task_cs_parquet.go
- task_parquet.go
- trailer.go
- ts_mms_tables.go
- tssp_file.go
- tssp_file_attached_reader.go
- tssp_file_detached_reader.go
- tssp_file_meta.go
- tssp_file_name.go
- tssp_file_page_cache_reader.go
- tssp_reader.go
- unnest_func.go
- unordered_reader.go
- util.go
- writer.go