Documentation
¶
Overview ¶
Package pilosa implements the core of the Pilosa distributed bitmap index. It contains all the domain objects, interfaces, and logic that defines pilosa.
Index ¶
- Constants
- Variables
- func AlwaysPrintf(format string, a ...interface{})
- func BitmapAsString(rbm *roaring.Bitmap) (r string)
- func CPUProfileForDur(dur time.Duration, outpath string)
- func Caller(upStack int) string
- func CompareTransactions(t1, t2 *Transaction) error
- func DecodeAttrs(v []byte) (map[string]interface{}, error)
- func DirExists(name string) bool
- func DiskUse(root string, requiredSuffix string) (tot int, err error)
- func DumpAllBolt()
- func EncodeAttrs(attr map[string]interface{}) ([]byte, error)
- func FileExists(name string) bool
- func FileLine(depth int) string
- func FileSize(name string) (int64, error)
- func FormatQualifiedFieldName(index, field string) string
- func FormatQualifiedFragmentName(index, field, view string, shard uint64) string
- func FormatQualifiedIndexName(index string) string
- func FormatQualifiedViewName(index, field, view string) string
- func GenerateNextPartitionedID(index string, prev uint64, partitionID, partitionN int) uint64
- func GenericApplyFilter(tx Tx, index, field, view string, shard uint64, ckey uint64, ...) (err error)
- func GetAvailPort() int
- func LatticeVersionInfo() string
- func MarshalInternalMessage(m Message, s Serializer) ([]byte, error)
- func MemProfileForDur(dur time.Duration, outpath string)
- func MustTxsrcToTxtype(txsrc string) (types []txtype)
- func NewBlueGreenIterator(tx *blueGreenTx, ait, bit roaring.ContainerIterator) *blueGreenIterator
- func NewNopAuditor() testhook.Auditor
- func NewRankCache(maxEntries uint32) *rankCache
- func NilInside(iface interface{}) bool
- func OpenIDAllocator(path string) (*idAllocator, error)
- func OptAPIImportWorkerPoolSize(size int) apiOption
- func OptAPIServer(s *Server) apiOption
- func PP(format string, a ...interface{})
- func PanicOn(err error)
- func ParseQualifiedFragmentName(name string) (index, field, view string, shard uint64, err error)
- func Printf(format string, a ...interface{}) (n int, err error)
- func RoaringFragmentChecksum(path string, index, field, view string, shard uint64) (r string, hotbits int)
- func SubdirLargestDirWithSuffix(rootDir, requiredDirSuffix string) (exists bool, largestSize int, err error)
- func TSPrintf(format string, a ...interface{})
- func ToContainer(typ byte, w []byte) (c *roaring.Container)
- func VV(format string, a ...interface{})
- func VersionInfo() string
- type API
- func (api *API) ActiveQueries(ctx context.Context) ([]ActiveQueryStatus, error)
- func (api *API) ApplySchema(ctx context.Context, s *Schema, remote bool) error
- func (api *API) AvailableShardsByIndex(ctx context.Context) map[string]*roaring.Bitmap
- func (api *API) Close() error
- func (api *API) ClusterMessage(ctx context.Context, reqBody io.Reader) error
- func (api *API) ClusterName() string
- func (api *API) CommitIDs(key IDAllocKey, session [32]byte, count uint64) error
- func (api *API) CreateField(ctx context.Context, indexName string, fieldName string, opts ...FieldOption) (*Field, error)
- func (api *API) CreateFieldKeys(ctx context.Context, index, field string, keys ...string) (map[string]uint64, error)
- func (api *API) CreateIndex(ctx context.Context, indexName string, options IndexOptions) (*Index, error)
- func (api *API) CreateIndexKeys(ctx context.Context, index string, keys ...string) (map[string]uint64, error)
- func (api *API) DeleteAvailableShard(_ context.Context, indexName, fieldName string, shardID uint64) error
- func (api *API) DeleteField(ctx context.Context, indexName string, fieldName string) error
- func (api *API) DeleteIndex(ctx context.Context, indexName string) error
- func (api *API) DeleteView(ctx context.Context, indexName string, fieldName string, viewName string) error
- func (api *API) ExportCSV(ctx context.Context, indexName string, fieldName string, shard uint64, ...) error
- func (api *API) Field(ctx context.Context, indexName, fieldName string) (*Field, error)
- func (api *API) FieldAttrDiff(ctx context.Context, indexName string, fieldName string, blocks []AttrBlock) (map[uint64]map[string]interface{}, error)
- func (api *API) FindFieldKeys(ctx context.Context, index, field string, keys ...string) (map[string]uint64, error)
- func (api *API) FindIndexKeys(ctx context.Context, index string, keys ...string) (map[string]uint64, error)
- func (api *API) FinishTransaction(ctx context.Context, id string, remote bool) (*Transaction, error)
- func (api *API) FragmentBlockData(ctx context.Context, body io.Reader) (_ []byte, err error)
- func (api *API) FragmentBlocks(ctx context.Context, indexName, fieldName, viewName string, shard uint64) ([]FragmentBlock, error)
- func (api *API) FragmentData(ctx context.Context, indexName, fieldName, viewName string, shard uint64) (io.WriterTo, error)
- func (api *API) GetTransaction(ctx context.Context, id string, remote bool) (*Transaction, error)
- func (api *API) GetTranslateEntryReader(ctx context.Context, offsets TranslateOffsetMap) (_ TranslateEntryReader, err error)
- func (api *API) Holder() *Holder
- func (api *API) HostStates(ctx context.Context) map[string]string
- func (api *API) Hosts(ctx context.Context) []*Node
- func (api *API) Import(ctx context.Context, qcx *Qcx, req *ImportRequest, opts ...ImportOption) (err error)
- func (api *API) ImportAtomicRecord(ctx context.Context, qcx *Qcx, req *AtomicRecord, opts ...ImportOption) error
- func (api *API) ImportColumnAttrs(ctx context.Context, req *ImportColumnAttrsRequest, opts ...ImportOption) error
- func (api *API) ImportRoaring(ctx context.Context, indexName, fieldName string, shard uint64, remote bool, ...) (err0 error)
- func (api *API) ImportValue(ctx context.Context, qcx *Qcx, req *ImportValueRequest, opts ...ImportOption) error
- func (api *API) ImportValueWithTx(ctx context.Context, qcx *Qcx, req *ImportValueRequest, opts ...ImportOption) (err0 error)
- func (api *API) ImportWithTx(ctx context.Context, qcx *Qcx, req *ImportRequest, opts ...ImportOption) error
- func (api *API) Index(ctx context.Context, indexName string) (*Index, error)
- func (api *API) IndexAttrDiff(ctx context.Context, indexName string, blocks []AttrBlock) (map[uint64]map[string]interface{}, error)
- func (api *API) Info() serverInfo
- func (api *API) Inspect(ctx context.Context, req *InspectRequest) (*HolderInfo, error)
- func (api *API) LatticeVersion() string
- func (api *API) LongQueryTime() time.Duration
- func (api *API) MaxShards(ctx context.Context) map[string]uint64
- func (api *API) Node() *Node
- func (api *API) PastQueries(ctx context.Context, remote bool) ([]PastQueryStatus, error)
- func (api *API) PrimaryReplicaNodeURL() url.URL
- func (api *API) Query(ctx context.Context, req *QueryRequest) (QueryResponse, error)
- func (api *API) RecalculateCaches(ctx context.Context) error
- func (api *API) RemoveNode(id string) (*Node, error)
- func (api *API) ReserveIDs(key IDAllocKey, session [32]byte, offset uint64, count uint64) ([]IDRange, error)
- func (api *API) ResetIDAlloc(index string) error
- func (api *API) ResizeAbort() error
- func (api *API) Schema(ctx context.Context) []*IndexInfo
- func (api *API) SchemaDetails(ctx context.Context) ([]*IndexInfo, error)
- func (api *API) SetCoordinator(ctx context.Context, id string) (oldNode, newNode *Node, err error)
- func (api *API) ShardDistribution(ctx context.Context) map[string]interface{}
- func (api *API) ShardNodes(ctx context.Context, indexName string, shard uint64) ([]*Node, error)
- func (api *API) StartTransaction(ctx context.Context, id string, timeout time.Duration, exclusive bool, ...) (*Transaction, error)
- func (api *API) State() string
- func (api *API) StatsWithTags(tags []string) stats.StatsClient
- func (api *API) Transactions(ctx context.Context) (map[string]*Transaction, error)
- func (api *API) TranslateData(ctx context.Context, indexName string, partition int) (io.WriterTo, error)
- func (api *API) TranslateFieldDB(ctx context.Context, indexName, fieldName string, rd io.Reader) error
- func (api *API) TranslateIDs(ctx context.Context, r io.Reader) (_ []byte, err error)
- func (api *API) TranslateIndexDB(ctx context.Context, indexName string, partitionID int, rd io.Reader) error
- func (api *API) TranslateIndexIDs(ctx context.Context, indexName string, ids []uint64) ([]string, error)
- func (api *API) TranslateIndexKey(ctx context.Context, indexName string, key string, writable bool) (uint64, error)
- func (api *API) TranslateKeys(ctx context.Context, r io.Reader) (_ []byte, err error)
- func (api *API) Txf() *TxFactory
- func (api *API) Usage(ctx context.Context, remote bool) (map[string]NodeUsage, error)
- func (api *API) Version() string
- func (api *API) Views(ctx context.Context, indexName string, fieldName string) ([]*view, error)
- type ActiveQueryStatus
- type AllTranslatorSummary
- type AtomicRecord
- type AttrBlock
- type AttrStore
- type BadRequestError
- type Barrier
- type Bit
- type BitmapLikeFilter
- type BlockDataRequest
- type BlockDataResponse
- type BoltIterator
- func (bi *BoltIterator) Close()
- func (bi *BoltIterator) Next() (ok bool)
- func (bi *BoltIterator) Seek(needle []byte) (ok bool)
- func (bi *BoltIterator) String() (r string)
- func (bi *BoltIterator) Valid() bool
- func (bi *BoltIterator) ValidForPrefix(prefix []byte) bool
- func (bi *BoltIterator) Value() (containerKey uint64, c *roaring.Container)
- type BoltTx
- func (tx *BoltTx) Add(index, field, view string, shard uint64, batched bool, a ...uint64) (changeCount int, err error)
- func (c *BoltTx) ApplyFilter(index, field, view string, shard uint64, ckey uint64, ...) (err error)
- func (tx *BoltTx) Commit() error
- func (tx *BoltTx) Container(index, field, view string, shard uint64, ckey uint64) (c *roaring.Container, err error)
- func (tx *BoltTx) ContainerIterator(index, field, view string, shard uint64, firstRoaringContainerKey uint64) (citer roaring.ContainerIterator, found bool, err error)
- func (tx *BoltTx) Contains(index, field, view string, shard uint64, key uint64) (exists bool, err error)
- func (tx *BoltTx) Count(index, field, view string, shard uint64) (uint64, error)
- func (tx *BoltTx) CountRange(index, field, view string, shard uint64, start, end uint64) (n uint64, err error)
- func (tx *BoltTx) Dump(short bool, shard uint64)
- func (tx *BoltTx) ForEach(index, field, view string, shard uint64, fn func(i uint64) error) error
- func (tx *BoltTx) ForEachRange(index, field, view string, shard uint64, start, end uint64, ...) error
- func (tx *BoltTx) GetFieldSizeBytes(index, field string) (uint64, error)
- func (tx *BoltTx) GetSortedFieldViewList(idx *Index, shard uint64) (fvs []short_txkey.FieldView, err error)
- func (tx *BoltTx) Group() *TxGroup
- func (tx *BoltTx) ImportRoaringBits(index, field, view string, shard uint64, itr roaring.RoaringIterator, ...) (changed int, rowSet map[uint64]int, err error)
- func (tx *BoltTx) IncrementOpN(index, field, view string, shard uint64, changedN int)
- func (tx *BoltTx) IsDone() (done bool)
- func (tx *BoltTx) Max(index, field, view string, shard uint64) (uint64, error)
- func (tx *BoltTx) Min(index, field, view string, shard uint64) (uint64, bool, error)
- func (tx *BoltTx) NewTxIterator(index, field, view string, shard uint64) *roaring.Iterator
- func (tx *BoltTx) OffsetRange(index, field, view string, shard, offset, start, endx uint64) (other *roaring.Bitmap, err error)
- func (tx *BoltTx) Options() Txo
- func (tx *BoltTx) Pointer() string
- func (tx *BoltTx) PutContainer(index, field, view string, shard uint64, ckey uint64, rc *roaring.Container) error
- func (tx *BoltTx) Readonly() bool
- func (tx *BoltTx) Remove(index, field, view string, shard uint64, a ...uint64) (changeCount int, err error)
- func (tx *BoltTx) RemoveContainer(index, field, view string, shard uint64, ckey uint64) error
- func (tx *BoltTx) RoaringBitmap(index, field, view string, shard uint64) (*roaring.Bitmap, error)
- func (tx *BoltTx) RoaringBitmapReader(index, field, view string, shard uint64, fragmentPathForRoaring string) (r io.ReadCloser, sz int64, err error)
- func (tx *BoltTx) Rollback()
- func (tx *BoltTx) Sn() int64
- func (tx *BoltTx) Type() string
- func (tx *BoltTx) UnionInPlace(index, field, view string, shard uint64, others ...*roaring.Bitmap) error
- func (tx *BoltTx) UseRowCache() bool
- type BoltWrapper
- func (w *BoltWrapper) CleanupTx(tx Tx)
- func (w *BoltWrapper) Close() (err error)
- func (w *BoltWrapper) DeleteDBPath(dbs *DBShard) (err error)
- func (w *BoltWrapper) DeleteField(index, field, fieldPath string) (err error)
- func (w *BoltWrapper) DeleteFragment(index, field, view string, shard uint64, frag interface{}) error
- func (w *BoltWrapper) DeleteIndex(indexName string) error
- func (w *BoltWrapper) DeletePrefix(prefix []byte) error
- func (w *BoltWrapper) HasData() (has bool, err error)
- func (w *BoltWrapper) IsClosed() (closed bool)
- func (w *BoltWrapper) NewTx(write bool, initialIndexName string, o Txo) (tx Tx, err error)
- func (w *BoltWrapper) NewTxREAD() (*bolt.Tx, error)
- func (w *BoltWrapper) NewTxWRITE() (*bolt.Tx, error)
- func (w *BoltWrapper) OpenListString() (r string)
- func (w *BoltWrapper) OpenSnList() (slc []int64)
- func (w *BoltWrapper) Path() string
- func (w *BoltWrapper) SetHolder(h *Holder)
- func (w *BoltWrapper) StringifiedBoltKeys(optionalUseThisTx Tx, short bool) (r string)
- type Closer
- type ClusterStatus
- type CmdIO
- type ColumnAttrSet
- type ConflictError
- type CreateFieldMessage
- type CreateIndexMessage
- type CreateShardMessage
- type CreateViewMessage
- type DBHolder
- type DBIndex
- type DBPerShard
- func (per *DBPerShard) Close() (err error)
- func (per *DBPerShard) Del(dbs *DBShard) (err error)
- func (per *DBPerShard) DeleteFieldFromStore(index, field, fieldPath string) (err error)
- func (per *DBPerShard) DeleteFragment(index, field, view string, shard uint64, frag *fragment) error
- func (per *DBPerShard) DeleteIndex(index string) (err error)
- func (per *DBPerShard) DumpAll()
- func (per *DBPerShard) GetDBShard(index string, shard uint64, idx *Index) (dbs *DBShard, err error)
- func (per *DBPerShard) GetFieldView2ShardsMapForIndex(idx *Index) (vs *FieldView2Shards, err error)
- func (per *DBPerShard) HasData(which int) (hasData bool, err error)
- func (per *DBPerShard) ListOpenString() (r string)
- func (per *DBPerShard) LoadExistingDBs() (err error)
- func (per *DBPerShard) RoaringHasData() (bool, error)
- func (per *DBPerShard) TypedDBPerShardGetShardsForIndex(ty txtype, idx *Index, roaringViewPath string, requireData bool) (shardMap map[uint64]bool, err error)
- type DBRegistry
- type DBShard
- func (dbs *DBShard) AllFieldViews() (fvs []txkey.FieldView, err error)
- func (dbs *DBShard) Cleanup(tx Tx)
- func (dbs *DBShard) Close() (err error)
- func (dbs *DBShard) DeleteDBPath() (err error)
- func (dbs *DBShard) DeleteFieldFromStore(index, field, fieldPath string) (err error)
- func (dbs *DBShard) DeleteFragment(index, field, view string, shard uint64, frag interface{}) (err error)
- func (dbs *DBShard) DumpAll()
- func (dbs *DBShard) HolderString() string
- func (dbs *DBShard) NewTx(write bool, initialIndexName string, o Txo) (tx Tx, err error)
- type DBWrapper
- type DeleteAvailableShardMessage
- type DeleteFieldMessage
- type DeleteIndexMessage
- type DeleteViewMessage
- type DiskUsage
- type Dumper
- type Error
- type ExtractedIDColumn
- type ExtractedIDMatrix
- type ExtractedTable
- type ExtractedTableColumn
- type ExtractedTableField
- type Field
- func (f *Field) AddRemoteAvailableShards(b *roaring.Bitmap) error
- func (f *Field) AvailableShards(localOnly bool) *roaring.Bitmap
- func (f *Field) CacheSize() uint32
- func (f *Field) ClearBit(tx Tx, rowID, colID uint64) (changed bool, err error)
- func (f *Field) ClearValue(tx Tx, columnID uint64) (changed bool, err error)
- func (f *Field) Close() error
- func (f *Field) CreatedAt() int64
- func (f *Field) ForeignIndex() string
- func (f *Field) GetIndex() *Index
- func (f *Field) Import(qcx *Qcx, rowIDs, columnIDs []uint64, timestamps []*time.Time, ...) (err0 error)
- func (f *Field) Index() string
- func (f *Field) Keys() bool
- func (f *Field) LocalAvailableShards() *roaring.Bitmap
- func (f *Field) MaxForShard(tx Tx, shard uint64, filter *Row) (ValCount, error)
- func (f *Field) MinForShard(tx Tx, shard uint64, filter *Row) (ValCount, error)
- func (f *Field) Name() string
- func (f *Field) Open() error
- func (f *Field) Options() FieldOptions
- func (f *Field) Path() string
- func (f *Field) Range(qcx *Qcx, name string, op pql.Token, predicate int64) (*Row, error)
- func (f *Field) RemoveAvailableShard(v uint64) error
- func (f *Field) Row(tx Tx, rowID uint64) (*Row, error)
- func (f *Field) RowAttrStore() AttrStore
- func (f *Field) RowTime(tx Tx, rowID uint64, time time.Time, quantum string) (*Row, error)
- func (f *Field) SetBit(tx Tx, rowID, colID uint64, t *time.Time) (changed bool, err error)
- func (f *Field) SetCacheSize(v uint32) error
- func (f *Field) SetValue(tx Tx, columnID uint64, value int64) (changed bool, err error)
- func (f *Field) StringValue(tx Tx, columnID uint64) (value string, exists bool, err error)
- func (f *Field) TimeQuantum() TimeQuantum
- func (f *Field) TranslateStore() TranslateStore
- func (f *Field) TranslateStorePath() string
- func (f *Field) Type() string
- func (f *Field) Value(tx Tx, columnID uint64) (value int64, exists bool, err error)
- type FieldInfo
- type FieldOption
- func OptFieldForeignIndex(index string) FieldOption
- func OptFieldKeys() FieldOption
- func OptFieldTypeBool() FieldOption
- func OptFieldTypeDecimal(scale int64, minmax ...pql.Decimal) FieldOption
- func OptFieldTypeDefault() FieldOption
- func OptFieldTypeInt(min, max int64) FieldOption
- func OptFieldTypeMutex(cacheType string, cacheSize uint32) FieldOption
- func OptFieldTypeSet(cacheType string, cacheSize uint32) FieldOption
- func OptFieldTypeTime(timeQuantum TimeQuantum, opt ...bool) FieldOption
- type FieldOptions
- type FieldRow
- type FieldStatus
- type FieldUsage
- type FieldValue
- type FieldView2Shards
- type FileSystem
- type FragSum
- type FragmentBlock
- type FragmentInfo
- type GCNotifier
- type GroupCount
- type GroupCounts
- type Handler
- type Hasher
- type Holder
- func (h *Holder) Activate()
- func (h *Holder) BeginTx(writable bool, idx *Index, shard uint64) (Tx, error)
- func (h *Holder) Close() error
- func (h *Holder) CreateIndex(name string, opt IndexOptions) (*Index, error)
- func (h *Holder) CreateIndexIfNotExists(name string, opt IndexOptions) (*Index, error)
- func (h *Holder) DeleteIndex(name string) error
- func (h *Holder) DumpAllShards()
- func (h *Holder) Field(index, name string) *Field
- func (h *Holder) FinishTransaction(ctx context.Context, id string) (*Transaction, error)
- func (h *Holder) GetTransaction(ctx context.Context, id string) (*Transaction, error)
- func (h *Holder) HasData() (bool, error)
- func (h *Holder) HasRoaringData() (has bool, err error)
- func (h *Holder) HolderPathFromIndexPath(indexPath, indexName string) string
- func (h *Holder) Index(name string) (idx *Index)
- func (h *Holder) IndexPath(name string) string
- func (h *Holder) Indexes() []*Index
- func (h *Holder) Inspect(ctx context.Context, req *InspectRequest) (*HolderInfo, error)
- func (h *Holder) LoadNodeID() (string, error)
- func (h *Holder) NeedsSnapshot() bool
- func (h *Holder) Open() error
- func (h *Holder) Path() string
- func (h *Holder) Process(ctx context.Context, op HolderOperator) (err error)
- func (h *Holder) Schema(includeHiddenAndViews bool) []*IndexInfo
- func (h *Holder) StartTransaction(ctx context.Context, id string, timeout time.Duration, exclusive bool) (*Transaction, error)
- func (h *Holder) Transactions(ctx context.Context) (map[string]*Transaction, error)
- func (h *Holder) Txf() *TxFactory
- type HolderConfig
- type HolderFilter
- type HolderFilterAll
- type HolderFilterParams
- type HolderInfo
- type HolderOperator
- type HolderOpts
- type HolderProcess
- type HolderProcessNone
- type IDAllocCommitRequest
- type IDAllocKey
- type IDAllocReserveRequest
- type IDRange
- type ImportColumnAttrsRequest
- type ImportOption
- type ImportOptions
- type ImportRequest
- type ImportResponse
- type ImportRoaringRequest
- type ImportValueRequest
- type InMemTransactionStore
- type InMemTranslateStore
- func (s *InMemTranslateStore) Close() error
- func (s *InMemTranslateStore) ComputeTranslatorSummaryCols(partitionID int, topo *Topology) (sum *TranslatorSummary, err error)
- func (s *InMemTranslateStore) ComputeTranslatorSummaryRows() (sum *TranslatorSummary, err error)
- func (s *InMemTranslateStore) CreateKeys(keys ...string) (map[string]uint64, error)
- func (s *InMemTranslateStore) EntryReader(ctx context.Context, offset uint64) (TranslateEntryReader, error)
- func (s *InMemTranslateStore) FindKeys(keys ...string) (map[string]uint64, error)
- func (s *InMemTranslateStore) ForceSet(id uint64, key string) error
- func (s *InMemTranslateStore) GetStorePath() string
- func (s *InMemTranslateStore) IDWalker(walk func(key string, col uint64)) error
- func (s *InMemTranslateStore) KeyWalker(walk func(key string, col uint64)) error
- func (s *InMemTranslateStore) MaxID() (uint64, error)
- func (s *InMemTranslateStore) PartitionID() int
- func (s *InMemTranslateStore) ReadFrom(r io.Reader) (count int64, err error)
- func (s *InMemTranslateStore) ReadOnly() bool
- func (s *InMemTranslateStore) RepairKeys(topo *Topology, verbose, applyKeyRepairs bool) (changed bool, err error)
- func (s *InMemTranslateStore) SetReadOnly(v bool)
- func (s *InMemTranslateStore) TranslateID(id uint64) (string, error)
- func (s *InMemTranslateStore) TranslateIDs(ids []uint64) ([]string, error)
- func (s *InMemTranslateStore) TranslateKey(key string, writable bool) (uint64, error)
- func (s *InMemTranslateStore) TranslateKeys(keys []string, writable bool) (_ []uint64, err error)
- func (s *InMemTranslateStore) WriteNotify() <-chan struct{}
- func (s *InMemTranslateStore) WriteTo(w io.Writer) (int64, error)
- type Index
- func (i *Index) AvailableShards(localOnly bool) *roaring.Bitmap
- func (i *Index) BeginTx(writable bool, shard uint64) (Tx, error)
- func (i *Index) Close() error
- func (i *Index) ColumnAttrStore() AttrStore
- func (idx *Index) ComputeTranslatorSummary(verbose, checkKeys, applyKeyRepairs bool, topo *Topology, nodeID string, ...) (ats *AllTranslatorSummary, err error)
- func (i *Index) CreateField(name string, opts ...FieldOption) (*Field, error)
- func (i *Index) CreateFieldIfNotExists(name string, opts ...FieldOption) (*Field, error)
- func (i *Index) CreatedAt() int64
- func (i *Index) DeleteField(name string) error
- func (idx *Index) Dump(label string)
- func (i *Index) Field(name string) *Field
- func (i *Index) Fields() []*Field
- func (i *Index) Holder() *Holder
- func (i *Index) Keys() bool
- func (i *Index) Name() string
- func (i *Index) NeedsSnapshot() bool
- func (i *Index) NewTx(txo Txo) Tx
- func (i *Index) Open() error
- func (i *Index) OpenWithTimestamp() error
- func (i *Index) Options() IndexOptions
- func (i *Index) Path() string
- func (i *Index) QualifiedName() string
- func (idx *Index) SliceOfShards(field, view, viewPath string) (sliceOfShards []uint64, err error)
- func (idx *Index) StringifiedRoaringKeys(hashOnly, showOps bool, o Txo) (r string)
- func (i *Index) TranslateStore(partitionID int) TranslateStore
- func (i *Index) TranslateStorePath(partitionID int) string
- func (idx *Index) Txf() *TxFactory
- func (idx *Index) WriteFragmentChecksums(w io.Writer, showBits, showOps bool, topo *Topology, verbose bool) (sum *IndexFragmentSummary)
- type IndexFragmentSummary
- type IndexInfo
- type IndexOptions
- type IndexStatus
- type IndexTranslateOffsetMap
- type IndexUsage
- type InspectRequest
- type InspectRequestParams
- type InspectResponse
- type InternalClient
- type InternalQueryClient
- type Jmphasher
- type KeyOrID
- type LineSorter
- type MemoryUsage
- type Message
- type MessageProcessingError
- type MultiReaderB
- type MultiTranslateEntryReader
- type Node
- type NodeEvent
- type NodeEventType
- type NodeStateMessage
- type NodeStatus
- type NodeUsage
- type Nodes
- func (a Nodes) Clone() []*Node
- func (a Nodes) Contains(n *Node) bool
- func (a Nodes) ContainsID(id string) bool
- func (a Nodes) Filter(n *Node) []*Node
- func (a Nodes) FilterID(id string) []*Node
- func (a Nodes) FilterURI(uri URI) []*Node
- func (a Nodes) IDs() []string
- func (a Nodes) NodeByID(id string) *Node
- func (a Nodes) URIs() []URI
- type NopGeneration
- type NotFoundError
- type OpenIDAllocatorFunc
- type OpenTransactionStoreFunc
- type OpenTranslateReaderFunc
- type OpenTranslateStoreFunc
- type Pair
- type PairField
- type Pairs
- type PairsField
- type PastQueryStatus
- type PreconditionFailedError
- type Qcx
- func (q *Qcx) Abort()
- func (qcx *Qcx) ClearRequiredForAtomicWriteTx()
- func (q *Qcx) Finish() (err error)
- func (qcx *Qcx) GetTx(o Txo) (tx Tx, finisher func(perr *error), err error)
- func (qcx *Qcx) ListOpenTx() string
- func (q *Qcx) Reset()
- func (qcx *Qcx) SetRequiredForAtomicWriteTx(tx Tx)
- func (qcx *Qcx) StartAtomicWriteTx(o Txo)
- type QueryRequest
- type QueryResponse
- type RBFTx
- func (tx *RBFTx) Add(index, field, view string, shard uint64, batched bool, a ...uint64) (changeCount int, err error)
- func (tx *RBFTx) ApplyFilter(index, field, view string, shard uint64, ckey uint64, ...) (err error)
- func (tx *RBFTx) Commit() (err error)
- func (tx *RBFTx) Container(index, field, view string, shard uint64, key uint64) (*roaring.Container, error)
- func (tx *RBFTx) ContainerIterator(index, field, view string, shard uint64, key uint64) (citer roaring.ContainerIterator, found bool, err error)
- func (tx *RBFTx) Contains(index, field, view string, shard uint64, v uint64) (exists bool, err error)
- func (tx *RBFTx) Count(index, field, view string, shard uint64) (uint64, error)
- func (tx *RBFTx) CountRange(index, field, view string, shard uint64, start, end uint64) (n uint64, err error)
- func (tx *RBFTx) DBPath() string
- func (tx *RBFTx) Dump(short bool, shard uint64)
- func (tx *RBFTx) ForEach(index, field, view string, shard uint64, fn func(i uint64) error) error
- func (tx *RBFTx) ForEachRange(index, field, view string, shard uint64, start, end uint64, ...) error
- func (tx *RBFTx) GetFieldSizeBytes(index, field string) (uint64, error)
- func (tx *RBFTx) GetSortedFieldViewList(idx *Index, shard uint64) (fvs []txkey.FieldView, err error)
- func (tx *RBFTx) Group() *TxGroup
- func (tx *RBFTx) ImportRoaringBits(index, field, view string, shard uint64, rit roaring.RoaringIterator, ...) (changed int, rowSet map[uint64]int, err error)
- func (tx *RBFTx) IncrementOpN(index, field, view string, shard uint64, changedN int)
- func (tx *RBFTx) IsDone() (done bool)
- func (tx *RBFTx) Max(index, field, view string, shard uint64) (uint64, error)
- func (tx *RBFTx) Min(index, field, view string, shard uint64) (uint64, bool, error)
- func (tx *RBFTx) NewTxIterator(index, field, view string, shard uint64) *roaring.Iterator
- func (tx *RBFTx) OffsetRange(index, field, view string, shard uint64, offset, start, end uint64) (*roaring.Bitmap, error)
- func (tx *RBFTx) Options() Txo
- func (tx *RBFTx) Pointer() string
- func (tx *RBFTx) PutContainer(index, field, view string, shard uint64, key uint64, c *roaring.Container) error
- func (tx *RBFTx) Readonly() bool
- func (tx *RBFTx) Remove(index, field, view string, shard uint64, a ...uint64) (changeCount int, err error)
- func (tx *RBFTx) RemoveContainer(index, field, view string, shard uint64, key uint64) error
- func (tx *RBFTx) RoaringBitmap(index, field, view string, shard uint64) (*roaring.Bitmap, error)
- func (tx *RBFTx) RoaringBitmapReader(index, field, view string, shard uint64, fragmentPathForRoaring string) (r io.ReadCloser, sz int64, err error)
- func (tx *RBFTx) Rollback()
- func (tx *RBFTx) Sn() int64
- func (tx *RBFTx) Type() string
- func (tx *RBFTx) UnionInPlace(index, field, view string, shard uint64, others ...*roaring.Bitmap) error
- func (tx *RBFTx) UseRowCache() bool
- type RawRoaringData
- type RbfDBWrapper
- func (w *RbfDBWrapper) CleanupTx(tx Tx)
- func (w *RbfDBWrapper) Close() error
- func (w *RbfDBWrapper) DeleteDBPath(dbs *DBShard) error
- func (w *RbfDBWrapper) DeleteField(index, field, fieldPath string) error
- func (w *RbfDBWrapper) DeleteFragment(index, field, view string, shard uint64, frag interface{}) error
- func (w *RbfDBWrapper) DeleteIndex(indexName string) error
- func (w *RbfDBWrapper) HasData() (has bool, err error)
- func (w *RbfDBWrapper) NewTx(write bool, initialIndex string, o Txo) (_ Tx, err error)
- func (w *RbfDBWrapper) OpenListString() (r string)
- func (w *RbfDBWrapper) OpenSnList() (slc []int64)
- func (w *RbfDBWrapper) Path() string
- func (w *RbfDBWrapper) SetHolder(h *Holder)
- type RecalculateCaches
- type ResizeInstruction
- type ResizeInstructionComplete
- type ResizeSource
- type RoaringTx
- func (tx *RoaringTx) Add(index, field, view string, shard uint64, batched bool, a ...uint64) (changeCount int, err error)
- func (c *RoaringTx) ApplyFilter(index, field, view string, shard uint64, ckey uint64, ...) (err error)
- func (tx *RoaringTx) Commit() error
- func (tx *RoaringTx) Container(index, field, view string, shard uint64, key uint64) (*roaring.Container, error)
- func (tx *RoaringTx) ContainerIterator(index, field, view string, shard uint64, key uint64) (citer roaring.ContainerIterator, found bool, err error)
- func (tx *RoaringTx) Contains(index, field, view string, shard uint64, v uint64) (exists bool, err error)
- func (tx *RoaringTx) Count(index, field, view string, shard uint64) (uint64, error)
- func (tx *RoaringTx) CountRange(index, field, view string, shard uint64, start, end uint64) (uint64, error)
- func (tx *RoaringTx) Dump(short bool, shard uint64)
- func (tx *RoaringTx) ForEach(index, field, view string, shard uint64, fn func(i uint64) error) error
- func (tx *RoaringTx) ForEachRange(index, field, view string, shard uint64, start, end uint64, ...) error
- func (tx *RoaringTx) GetFieldSizeBytes(index, field string) (uint64, error)
- func (tx *RoaringTx) GetSortedFieldViewList(idx *Index, shard uint64) (fvs []txkey.FieldView, err error)
- func (tx *RoaringTx) Group() *TxGroup
- func (tx *RoaringTx) ImportRoaringBits(index, field, view string, shard uint64, rit roaring.RoaringIterator, ...) (changed int, rowSet map[uint64]int, err error)
- func (tx *RoaringTx) IncrementOpN(index, field, view string, shard uint64, changedN int)
- func (tx *RoaringTx) IsDone() (done bool)
- func (tx *RoaringTx) Max(index, field, view string, shard uint64) (uint64, error)
- func (tx *RoaringTx) Min(index, field, view string, shard uint64) (uint64, bool, error)
- func (tx *RoaringTx) NewTxIterator(index, field, view string, shard uint64) *roaring.Iterator
- func (tx *RoaringTx) OffsetRange(index, field, view string, shard uint64, offset, start, end uint64) (*roaring.Bitmap, error)
- func (tx *RoaringTx) Options() Txo
- func (tx *RoaringTx) Pointer() string
- func (tx *RoaringTx) PutContainer(index, field, view string, shard uint64, key uint64, c *roaring.Container) error
- func (tx *RoaringTx) Readonly() bool
- func (tx *RoaringTx) Remove(index, field, view string, shard uint64, a ...uint64) (changeCount int, err error)
- func (tx *RoaringTx) RemoveContainer(index, field, view string, shard uint64, key uint64) error
- func (tx *RoaringTx) RoaringBitmap(index, field, view string, shard uint64) (*roaring.Bitmap, error)
- func (tx *RoaringTx) RoaringBitmapReader(index, field, view string, shard uint64, fragmentPathForRoaring string) (r io.ReadCloser, sz int64, err error)
- func (tx *RoaringTx) Rollback()
- func (tx *RoaringTx) Sn() int64
- func (tx *RoaringTx) Type() string
- func (tx *RoaringTx) UnionInPlace(index, field, view string, shard uint64, others ...*roaring.Bitmap) error
- func (tx *RoaringTx) UseRowCache() bool
- type RoaringWrapper
- func (w *RoaringWrapper) CleanupTx(tx Tx)
- func (w *RoaringWrapper) Close() (err error)
- func (w *RoaringWrapper) DeleteDBPath(dbs *DBShard) (err error)
- func (w *RoaringWrapper) DeleteField(index, field, fieldPath string) error
- func (w *RoaringWrapper) DeleteFragment(index, field, view string, shard uint64, frag interface{}) error
- func (w *RoaringWrapper) HasData() (has bool, err error)
- func (w *RoaringWrapper) IsClosed() (closed bool)
- func (w *RoaringWrapper) NewTx(write bool, initialIndexName string, o Txo) (tx Tx, err error)
- func (w *RoaringWrapper) OpenListString() (r string)
- func (w *RoaringWrapper) OpenSnList() (slc []int64)
- func (w *RoaringWrapper) Path() string
- func (w *RoaringWrapper) SetHolder(h *Holder)
- type Row
- func (r *Row) Any() bool
- func (r *Row) Clone() (clone *Row)
- func (r *Row) Columns() []uint64
- func (r *Row) Count() uint64
- func (r *Row) Difference(others ...*Row) *Row
- func (r *Row) Freeze()
- func (r *Row) Includes(col uint64) bool
- func (r *Row) Intersect(other *Row) *Row
- func (r *Row) IsEmpty() bool
- func (r *Row) MarshalJSON() ([]byte, error)
- func (r *Row) Merge(other *Row)
- func (r *Row) Roaring() []byte
- func (r *Row) Segments() []rowSegment
- func (r *Row) SetBit(i uint64) (changed bool)
- func (r *Row) Shift(n int64) (*Row, error)
- func (r *Row) ToRows(callback func(*pb.RowResponse) error) error
- func (r *Row) ToTable() (*pb.TableResponse, error)
- func (r *Row) Union(others ...*Row) *Row
- func (r *Row) Xor(other *Row) *Row
- type RowIDs
- type RowIdentifiers
- type Schema
- type Serializer
- type Server
- func (s *Server) Close() error
- func (srv *Server) FinishTransaction(ctx context.Context, id string, remote bool) (*Transaction, error)
- func (s *Server) GRPCURI() URI
- func (srv *Server) GetTransaction(ctx context.Context, id string, remote bool) (*Transaction, error)
- func (s *Server) Holder() *Holder
- func (s *Server) InternalClient() InternalClient
- func (s *Server) NodeID() string
- func (s *Server) Open() error
- func (s *Server) SendAsync(m Message) error
- func (s *Server) SendSync(m Message) error
- func (s *Server) SendTo(to *Node, m Message) error
- func (srv *Server) StartTransaction(ctx context.Context, id string, timeout time.Duration, exclusive bool, ...) (*Transaction, error)
- func (s *Server) SyncData() error
- func (srv *Server) Transactions(ctx context.Context) (map[string]*Transaction, error)
- func (s *Server) UpAndDown() error
- type ServerOption
- func OptServerAntiEntropyInterval(interval time.Duration) ServerOption
- func OptServerAttrStoreFunc(af func(string) AttrStore) ServerOption
- func OptServerClusterDisabled(disabled bool, hosts []string) ServerOption
- func OptServerClusterHasher(h Hasher) ServerOption
- func OptServerClusterName(name string) ServerOption
- func OptServerDataDir(dir string) ServerOption
- func OptServerDiagnosticsInterval(dur time.Duration) ServerOption
- func OptServerExecutorPoolSize(size int) ServerOption
- func OptServerGCNotifier(gcn GCNotifier) ServerOption
- func OptServerGRPCURI(uri *URI) ServerOption
- func OptServerInternalClient(c InternalClient) ServerOption
- func OptServerIsCoordinator(is bool) ServerOption
- func OptServerLogger(l logger.Logger) ServerOption
- func OptServerLongQueryTime(dur time.Duration) ServerOption
- func OptServerMaxWritesPerRequest(n int) ServerOption
- func OptServerMetricInterval(dur time.Duration) ServerOption
- func OptServerNodeDownRetries(retries int, sleep time.Duration) ServerOption
- func OptServerNodeID(nodeID string) ServerOption
- func OptServerOpenIDAllocator(fn OpenIDAllocatorFunc) ServerOption
- func OptServerOpenTranslateReader(fn OpenTranslateReaderFunc) ServerOption
- func OptServerOpenTranslateStore(fn OpenTranslateStoreFunc) ServerOption
- func OptServerPrimaryTranslateStore(store TranslateStore) ServerOption
- func OptServerQueryHistoryLength(length int) ServerOption
- func OptServerRBFConfig(cfg *rbfcfg.Config) ServerOption
- func OptServerReplicaN(n int) ServerOption
- func OptServerRowcacheOn(rowcacheOn bool) ServerOption
- func OptServerSerializer(ser Serializer) ServerOption
- func OptServerStatsClient(sc stats.StatsClient) ServerOption
- func OptServerSystemInfo(si SystemInfo) ServerOption
- func OptServerTxsrc(txsrc string) ServerOption
- func OptServerURI(uri *URI) ServerOption
- type SetCoordinatorMessage
- type SignedRow
- type SnapshotQueue
- type SortByTot
- type SystemInfo
- type TimeQuantum
- type Topology
- func (t *Topology) ContainsID(id string) bool
- func (t *Topology) GetNodeIDs() []string
- func (topo *Topology) GetNonPrimaryReplicas(partitionID int) (nonPrimaryReplicas []string)
- func (topo *Topology) GetPrimaryForColKeyTranslation(index, key string) (primary int)
- func (t *Topology) GetPrimaryForShardReplication(index string, shard uint64) int
- func (topo *Topology) GetReplicasForPrimary(primary int) (replicaNodeIDs, nonReplicas map[string]bool)
- func (topo *Topology) IsPrimary(nodeID string, partitionID int) bool
- func (topo *Topology) KeyPartition(index, key string) int
- func (topo *Topology) PrimaryNodeIndex(partitionID int) (nodeIndex int)
- func (t *Topology) String() string
- type Transaction
- type TransactionManager
- func (tm *TransactionManager) Finish(ctx context.Context, id string) (*Transaction, error)
- func (tm *TransactionManager) Get(ctx context.Context, id string) (*Transaction, error)
- func (tm *TransactionManager) List(ctx context.Context) (map[string]*Transaction, error)
- func (tm *TransactionManager) ResetDeadline(ctx context.Context, id string) (*Transaction, error)
- func (tm *TransactionManager) Start(ctx context.Context, id string, timeout time.Duration, exclusive bool) (*Transaction, error)
- type TransactionMessage
- type TransactionStats
- type TransactionStore
- type TranslateEntry
- type TranslateEntryReader
- type TranslateIDsRequest
- type TranslateIDsResponse
- type TranslateKeysRequest
- type TranslateKeysResponse
- type TranslateOffsetMap
- func (m TranslateOffsetMap) FieldOffset(index, name string) uint64
- func (m TranslateOffsetMap) IndexPartitionOffset(name string, partitionID int) uint64
- func (m TranslateOffsetMap) SetFieldOffset(index, name string, offset uint64)
- func (m TranslateOffsetMap) SetIndexPartitionOffset(name string, partitionID int, offset uint64)
- type TranslateStore
- type TranslationResizeSource
- type TranslationSyncer
- type TranslatorSummary
- type Tx
- type TxBitmap
- type TxFactory
- func (f *TxFactory) Close() (err error)
- func (f *TxFactory) CloseIndex(idx *Index) error
- func (f *TxFactory) DeleteFieldFromStore(index, field, fieldPath string) (err error)
- func (f *TxFactory) DeleteFragmentFromStore(index, field, view string, shard uint64, frag *fragment) (err error)
- func (f *TxFactory) DeleteIndex(name string) (err error)
- func (f *TxFactory) DumpAll()
- func (f *TxFactory) GetDBShardPath(index string, shard uint64, idx *Index, ty txtype, write bool) (shardPath string, err error)
- func (txf *TxFactory) GetFieldView2ShardsMapForIndex(idx *Index) (vs *FieldView2Shards, err error)
- func (f *TxFactory) GetShardsForIndex(idx *Index, roaringViewPath string, requireData bool) (map[uint64]bool, error)
- func (f *TxFactory) IndexUsageDetails() (map[string]IndexUsage, uint64, error)
- func (txf *TxFactory) IsTxDatabasePath(path string) bool
- func (txf *TxFactory) NeedsSnapshot() (b bool)
- func (txf *TxFactory) NewDBPerShard(types []txtype, holderDir string, holder *Holder) (d *DBPerShard)
- func (f *TxFactory) NewQcx() (qcx *Qcx)
- func (f *TxFactory) NewTx(o Txo) (txn Tx)
- func (f *TxFactory) NewTxGroup() (g *TxGroup)
- func (f *TxFactory) Open() error
- func (f *TxFactory) TxType() string
- func (f *TxFactory) TxTypes() []txtype
- func (f *TxFactory) Types() []txtype
- func (f *TxFactory) UseRowCache() bool
- type TxGroup
- type TxStore
- type Txo
- type URI
- func (u *URI) HostPort() string
- func (u *URI) MarshalJSON() ([]byte, error)
- func (u *URI) Path(path string) string
- func (u *URI) Set(value string) error
- func (u *URI) SetPort(port uint16)
- func (u URI) String() string
- func (u URI) Type() string
- func (u *URI) URL() url.URL
- func (u *URI) UnmarshalJSON(b []byte) error
- type URIs
- type UpdateCoordinatorMessage
- type ValCount
- type ViewInfo
Constants ¶
const ( // DefaultPartitionN is the default number of partitions in a cluster. DefaultPartitionN = 256 // ClusterState represents the state returned in the /status endpoint. ClusterStateStarting = "STARTING" ClusterStateDegraded = "DEGRADED" // cluster is running but we've lost some # of hosts >0 but < replicaN ClusterStateNormal = "NORMAL" ClusterStateResizing = "RESIZING" )
const ( TRANSACTION_START = "start" TRANSACTION_FINISH = "finish" TRANSACTION_VALIDATE = "validate" )
Transaction Actions
const ( DefaultFieldType = FieldTypeSet DefaultCacheType = CacheTypeRanked // Default ranked field cache DefaultCacheSize = 50000 )
Default field settings.
const ( FieldTypeSet = "set" FieldTypeInt = "int" FieldTypeTime = "time" FieldTypeMutex = "mutex" FieldTypeBool = "bool" FieldTypeDecimal = "decimal" )
Field types.
const ( CacheTypeLRU = "lru" CacheTypeRanked = "ranked" CacheTypeNone = "none" )
Cache types.
const ( // ShardWidth is the number of column IDs in a shard. It must be a power of 2 greater than or equal to 16. // shardWidthExponent = 20 // set in shardwidthNN.go files ShardWidth = 1 << shardwidth.Exponent // HashBlockSize is the number of rows in a merkle hash block. HashBlockSize = 100 )
const ( RequestActionSet = "set" RequestActionClear = "clear" RequestActionOverwrite = "overwrite" )
const ( MetricCreateIndex = "create_index_total" MetricDeleteIndex = "delete_index_total" MetricCreateField = "create_field_total" MetricDeleteField = "delete_field_total" MetricDeleteAvailableShard = "delete_available_shard_total" MetricRecalculateCache = "recalculate_cache_total" MetricInvalidateCache = "invalidate_cache_total" MetricInvalidateCacheSkipped = "invalidate_cache_skipped_total" MetricReadDirtyCache = "dirty_cache_total" MetricRankCacheLength = "rank_cache_length" MetricCacheThresholdReached = "cache_threshold_reached_total" MetricRow = "query_row_total" MetricRowBSI = "query_row_bsi_total" MetricSetRowAttrs = "query_setrowattrs_total" MetricSetColumnAttrs = "query_setcolumnattrs_total" MetricSetBit = "set_bit_total" MetricClearBit = "clear_bit_total" MetricImportingN = "importing_total" MetricImportedN = "imported_total" MetricClearingN = "clearing_total" MetricClearedN = "cleared_total" MetricSnapshotDurationSeconds = "snapshot_duration_seconds" MetricBlockRepair = "block_repair_total" MetricSyncFieldDurationSeconds = "sync_field_duration_seconds" MetricSyncIndexDurationSeconds = "sync_index_duration_seconds" MetricColumnAttrStoreBlocks = "column_attr_store_blocks_total" MetricColumnAttrDiff = "column_attr_diff_total" MetricRowAttrStoreBlocks = "row_attr_store_blocks_total" MetricRowAttrDiff = "row_attr_diff_total" MetricHTTPRequest = "http_request_duration_seconds" MetricGRPCUnaryQueryDurationSeconds = "grpc_request_pql_unary_query_duration_seconds" MetricGRPCUnaryFormatDurationSeconds = "grpc_request_pql_unary_format_duration_seconds" MetricGRPCStreamQueryDurationSeconds = "grpc_request_pql_stream_query_duration_seconds" MetricGRPCStreamFormatDurationSeconds = "grpc_request_pql_stream_format_duration_seconds" MetricMaxShard = "maximum_shard" MetricAntiEntropy = "antientropy_total" MetricAntiEntropyDurationSeconds = "antientropy_duration_seconds" MetricGarbageCollection = "garbage_collection_total" MetricGoroutines = "goroutines" MetricOpenFiles = "open_files" MetricHeapAlloc = "heap_alloc" MetricHeapInuse = "heap_inuse" MetricStackInuse = "stack_inuse" MetricMallocs = "mallocs" MetricFrees = "frees" MetricTransactionStart = "transaction_start" MetricTransactionEnd = "transaction_end" MetricTransactionBlocked = "transaction_blocked" MetricExclusiveTransactionRequest = "transaction_exclusive_request" MetricExclusiveTransactionActive = "transaction_exclusive_active" MetricExclusiveTransactionEnd = "transaction_exclusive_end" MetricExclusiveTransactionBlocked = "transaction_exclusive_blocked" MetricPqlQueries = "pql_queries_total" MetricSqlQueries = "sql_queries_total" )
const ( RoaringTxn string = "roaring" RBFTxn string = "rbf" BoltTxn string = "bolt" )
public strings that pilosa/server/config.go can reference
const DefaultTxsrc = RoaringTxn
DefaultTxsrc is set here. pilosa/server/config.go references it to set the default for pilosa server exeutable. Can be overridden with env variable PILOSA_TXSRC for testing.
const DetectMemAccessPastTx = false
DetectMemAccessPastTx true helps us catch places in api and executor where mmapped memory is being accessed after the point in time which the transaction has committed or rolled back. Since memory segments will be recycled by the underlying databases, this can lead to corruption. When DetectMemAccessPastTx is true, code in bolt.go will copy the transactionally viewed memory before returning it for bitmap reading, and then zero it or overwrite it with -2 when the Tx completes.
Should be false for production.
const ErrTransactionExclusive = Error("there is an exclusive transaction, try later")
const ErrTransactionExists = Error("transaction with the given id already exists")
const ErrTransactionNotFound = Error("transaction not found")
const FragmentNotFound = Error("fragment not found")
const LeftShifted16MaxContainerKey = uint64(0xffffffffffff0000) // or math.MaxUint64 - (1<<16 - 1), or 18446744073709486080
LeftShifted16MaxContainerKey is 0xffffffffffff0000. It is similar to the roaring.maxContainerKey 0x0000ffffffffffff, but shifted 16 bits to the left so its domain is the full [0, 2^64) bit space. It is used to match the semantics of the roaring.OffsetRange() API. This is the maximum endx value for Tx.OffsetRange(), because the lowbits, as in the roaring.OffsetRange(), are not allowed to be set. It is used in Tx.RoaringBitamp() to obtain the full contents of a fragment from a call from tx.OffsetRange() by requesting [0, LeftShifted16MaxContainerKey) with an offset of 0.
const RFC3339MsecTz0 = "2006-01-02T15:04:05.000Z07:00"
const RFC3339UsecTz0 = "2006-01-02T15:04:05.000000Z07:00"
const TimeFormat = "2006-01-02T15:04"
TimeFormat is the go-style time format used to parse string dates.
const TxInitialMmapSize = 4 << 30 // 4GB
const ViewNotFound = Error("view not found")
Variables ¶
var ( ErrHostRequired = errors.New("host required") ErrIndexRequired = errors.New("index required") ErrIndexExists = errors.New("index already exists") ErrIndexNotFound = errors.New("index not found") ErrForeignIndexNotFound = errors.New("foreign index not found") // ErrFieldRequired is returned when no field is specified. ErrFieldRequired = errors.New("field required") ErrColumnRequired = errors.New("column required") ErrFieldExists = errors.New("field already exists") ErrFieldNotFound = errors.New("field not found") ErrBSIGroupNotFound = errors.New("bsigroup not found") ErrBSIGroupExists = errors.New("bsigroup already exists") ErrBSIGroupNameRequired = errors.New("bsigroup name required") ErrInvalidBSIGroupType = errors.New("invalid bsigroup type") ErrInvalidBSIGroupRange = errors.New("invalid bsigroup range") ErrInvalidBSIGroupValueType = errors.New("invalid bsigroup value type") ErrBSIGroupValueTooLow = errors.New("bsigroup value too low") ErrBSIGroupValueTooHigh = errors.New("bsigroup value too high") ErrInvalidRangeOperation = errors.New("invalid range operation") ErrInvalidBetweenValue = errors.New("invalid value for between operation") ErrDecimalOutOfRange = errors.New("decimal value out of range") ErrInvalidView = errors.New("invalid view") ErrInvalidCacheType = errors.New("invalid cache type") ErrName = errors.New("invalid index or field name, must match [a-z][a-z0-9_-]* and contain at most 230 characters") // ErrFragmentNotFound is returned when a fragment does not exist. ErrFragmentNotFound = errors.New("fragment not found") ErrQueryRequired = errors.New("query required") ErrQueryCancelled = errors.New("query cancelled") ErrQueryTimeout = errors.New("query timeout") ErrTooManyWrites = errors.New("too many write commands") // TODO(2.0) poorly named - used when a *node* doesn't own a shard. Probably // we won't need this error at all by 2.0 though. ErrClusterDoesNotOwnShard = errors.New("node does not own shard") // ErrPreconditionFailed is returned when specified index/field createdAt timestamps don't match ErrPreconditionFailed = errors.New("precondition failed") ErrNodeIDNotExists = errors.New("node with provided ID does not exist") ErrNodeNotCoordinator = errors.New("node is not the coordinator") ErrResizeNotRunning = errors.New("no resize job currently running") ErrResizeNoReplicas = errors.New("not enough data to perform resize (replica factor may need to be increased)") ErrNotImplemented = errors.New("not implemented") ErrFieldsArgumentRequired = errors.New("fields argument required") ErrExpectedFieldListArgument = errors.New("expected field list argument") ErrIntFieldWithKeys = errors.New("int field cannot be created with 'keys=true' option") ErrDecimalFieldWithKeys = errors.New("decimal field cannot be created with 'keys=true' option") )
System errors.
var ( ErrTranslateStoreClosed = errors.New("translate store closed") ErrTranslateStoreReaderClosed = errors.New("translate store reader closed") ErrReplicationNotSupported = errors.New("replication not supported") ErrTranslateStoreReadOnly = errors.New("translate store could not find or create key, translate store read only") ErrTranslateStoreNotFound = errors.New("translate store not found") ErrTranslatingKeyNotFound = errors.New("translating key not found") ErrCannotOpenV1TranslateFile = errors.New("cannot open v1 translate .keys file") )
Translate store errors.
var BuildTime string
var Commit string
var ErrAborted = fmt.Errorf("error: update was aborted")
var ErrInvalidTimeQuantum = errors.New("invalid time quantum")
ErrInvalidTimeQuantum is returned when parsing a time quantum.
var ErrNoData = fmt.Errorf("no data")
var ErrQcxDone = fmt.Errorf("Qcx already Aborted or Finished, so must call reset before re-use")
var GoVersion string = runtime.Version()
var LatticeCommit string
var NaN = math.NaN()
var NewAuditor func() testhook.Auditor = NewNopAuditor
var NoopFinisher = func(perr *error) {}
var NopBroadcaster broadcaster = &nopBroadcaster{}
NopBroadcaster represents a Broadcaster that doesn't do anything.
var OurStdout io.Writer = os.Stdout
so we can multi write easily, use our own printf
var TrialDeadline string
var Variant string
var VerboseVerbose bool = false
for tons of debug output
var Version string
Functions ¶
func AlwaysPrintf ¶ added in v2.2.0
func AlwaysPrintf(format string, a ...interface{})
func BitmapAsString ¶ added in v2.2.0
func CPUProfileForDur ¶ added in v2.2.0
func CompareTransactions ¶
func CompareTransactions(t1, t2 *Transaction) error
func DecodeAttrs ¶
DecodeAttrs decodes a byte slice into an attribute map.
func DiskUse ¶ added in v2.2.0
DiskUse reports the total bytes uses by all files under root that match requiredSuffix. requiredSuffix can be empty string. Space used by directories is not counted.
func DumpAllBolt ¶ added in v2.3.0
func DumpAllBolt()
func EncodeAttrs ¶
EncodeAttrs encodes an attribute map into a byte slice.
func FileExists ¶ added in v2.2.0
func FormatQualifiedFieldName ¶ added in v2.2.0
FormatQualifiedFieldName generates a qualified name for the field to be used with Tx operations.
func FormatQualifiedFragmentName ¶ added in v2.2.0
FormatQualifiedFragmentName generates a qualified name for the fragment to be used with Tx operations.
func FormatQualifiedIndexName ¶ added in v2.2.0
FormatQualifiedIndexName generates a qualified name for the index to be used with Tx operations.
func FormatQualifiedViewName ¶ added in v2.2.0
FormatQualifiedViewName generates a qualified name for the view to be used with Tx operations.
func GenerateNextPartitionedID ¶
GenerateNextPartitionedID returns the next ID within the same partition.
func GenericApplyFilter ¶ added in v2.7.0
func GenericApplyFilter(tx Tx, index, field, view string, shard uint64, ckey uint64, filter roaring.BitmapFilter) (err error)
GenericApplyFilter implements ApplyFilter in terms of tx.ContainerIterator, as a convenience if a Tx backend hasn't implemented this new function yet.
func GetAvailPort ¶ added in v2.2.0
func GetAvailPort() int
GetAvailPort asks the OS for an unused port. There's a race here, where the port could be grabbed by someone else before the caller gets to Listen on it, but we are only using it to find a random port for the test hang debugging. Moreover, in practice such races are rare. Just ask for it again if the port is taken. Uses net.Listen("tcp", ":0") to determine a free port, then releases it back to the OS with Listener.Close().
func LatticeVersionInfo ¶ added in v2.2.0
func LatticeVersionInfo() string
func MarshalInternalMessage ¶
func MarshalInternalMessage(m Message, s Serializer) ([]byte, error)
MarshalInternalMessage serializes the pilosa message and adds pilosa internal type info which is used by the internal messaging stuff.
func MemProfileForDur ¶ added in v2.2.0
func MustTxsrcToTxtype ¶ added in v2.2.0
func MustTxsrcToTxtype(txsrc string) (types []txtype)
func NewBlueGreenIterator ¶ added in v2.2.0
func NewBlueGreenIterator(tx *blueGreenTx, ait, bit roaring.ContainerIterator) *blueGreenIterator
func NewNopAuditor ¶ added in v2.2.0
func NewRankCache ¶
func NewRankCache(maxEntries uint32) *rankCache
NewRankCache returns a new instance of RankCache.
func NilInside ¶ added in v2.2.0
func NilInside(iface interface{}) bool
NilInside checks if the provided iface is nil or contains a nil pointer, slice, array, map, or channel.
func OpenIDAllocator ¶ added in v2.5.0
func OptAPIImportWorkerPoolSize ¶
func OptAPIImportWorkerPoolSize(size int) apiOption
func OptAPIServer ¶
func OptAPIServer(s *Server) apiOption
func ParseQualifiedFragmentName ¶ added in v2.2.0
ParseQualifiedFragmentName parses a qualified name into its parts.
func Printf ¶ added in v2.2.0
Printf formats according to a format specifier and writes to standard output. It returns the number of bytes written and any write error encountered.
func RoaringFragmentChecksum ¶ added in v2.2.0
func SubdirLargestDirWithSuffix ¶ added in v2.2.0
func SubdirLargestDirWithSuffix(rootDir, requiredDirSuffix string) (exists bool, largestSize int, err error)
rootDir must exist. Return the size in bytes of the largest sub-directory that has the required suffix. The largestSize is from DiskUse() called on the sub-dir. DiskUse only counts file size, nothing for directory inodes.
func VersionInfo ¶
func VersionInfo() string
Types ¶
type API ¶
type API struct { Serializer Serializer // contains filtered or unexported fields }
API provides the top level programmatic interface to Pilosa. It is usually wrapped by a handler which provides an external interface (e.g. HTTP).
func (*API) ActiveQueries ¶
func (api *API) ActiveQueries(ctx context.Context) ([]ActiveQueryStatus, error)
func (*API) ApplySchema ¶
ApplySchema takes the given schema and applies it across the cluster (if remote is false), or just to this node (if remote is true). This is designed for the use case of replicating a schema from one Pilosa cluster to another which is initially empty. It is not officially supported in other scenarios and may produce surprising results.
func (*API) AvailableShardsByIndex ¶
AvailableShardsByIndex returns bitmaps of shards with available by index name.
func (*API) ClusterMessage ¶
ClusterMessage is for internal use. It decodes a protobuf message out of the body and forwards it to the BroadcastHandler.
func (*API) ClusterName ¶ added in v2.3.0
ClusterName returns the cluster name.
func (*API) CommitIDs ¶ added in v2.5.0
func (api *API) CommitIDs(key IDAllocKey, session [32]byte, count uint64) error
func (*API) CreateField ¶
func (api *API) CreateField(ctx context.Context, indexName string, fieldName string, opts ...FieldOption) (*Field, error)
CreateField makes the named field in the named index with the given options. This method currently only takes a single functional option, but that may be changed in the future to support multiple options.
func (*API) CreateFieldKeys ¶ added in v2.1.8
func (api *API) CreateFieldKeys(ctx context.Context, index, field string, keys ...string) (map[string]uint64, error)
CreateFieldKeys looks up keys in a field, mapping them to IDs. If a key does not exist, it will be created.
func (*API) CreateIndex ¶
func (api *API) CreateIndex(ctx context.Context, indexName string, options IndexOptions) (*Index, error)
CreateIndex makes a new Pilosa index.
func (*API) CreateIndexKeys ¶ added in v2.1.8
func (api *API) CreateIndexKeys(ctx context.Context, index string, keys ...string) (map[string]uint64, error)
CreateIndexKeys looks up column keys in the index, mapping them to IDs. If a key does not exist, it will be created.
func (*API) DeleteAvailableShard ¶
func (api *API) DeleteAvailableShard(_ context.Context, indexName, fieldName string, shardID uint64) error
DeleteAvailableShard a shard ID from the available shard set cache.
func (*API) DeleteField ¶
DeleteField removes the named field from the named index. If the index is not found, an error is returned. If the field is not found, it is ignored and no action is taken.
func (*API) DeleteIndex ¶
DeleteIndex removes the named index. If the index is not found it does nothing and returns no error.
func (*API) DeleteView ¶
func (api *API) DeleteView(ctx context.Context, indexName string, fieldName string, viewName string) error
DeleteView removes the given view.
func (*API) ExportCSV ¶
func (api *API) ExportCSV(ctx context.Context, indexName string, fieldName string, shard uint64, w io.Writer) error
ExportCSV encodes the fragment designated by the index,field,shard as CSV of the form <row>,<col>
func (*API) FieldAttrDiff ¶
func (api *API) FieldAttrDiff(ctx context.Context, indexName string, fieldName string, blocks []AttrBlock) (map[uint64]map[string]interface{}, error)
FieldAttrDiff determines the local row attribute data blocks which differ from those provided.
func (*API) FindFieldKeys ¶ added in v2.1.8
func (api *API) FindFieldKeys(ctx context.Context, index, field string, keys ...string) (map[string]uint64, error)
FindFieldKeys looks up keys in a field, mapping them to IDs. If a key does not exist, it will be absent from the resulting map.
func (*API) FindIndexKeys ¶ added in v2.1.8
func (api *API) FindIndexKeys(ctx context.Context, index string, keys ...string) (map[string]uint64, error)
FindIndexKeys looks up column keys in the index, mapping them to IDs. If a key does not exist, it will be absent from the resulting map.
func (*API) FinishTransaction ¶
func (*API) FragmentBlockData ¶
FragmentBlockData is an endpoint for internal usage. It is not guaranteed to return anything useful. Currently it returns protobuf encoded row and column ids from a "block" which is a subdivision of a fragment.
func (*API) FragmentBlocks ¶
func (api *API) FragmentBlocks(ctx context.Context, indexName, fieldName, viewName string, shard uint64) ([]FragmentBlock, error)
FragmentBlocks returns the checksums and block ids for all blocks in the specified fragment.
func (*API) FragmentData ¶
func (api *API) FragmentData(ctx context.Context, indexName, fieldName, viewName string, shard uint64) (io.WriterTo, error)
FragmentData returns all data in the specified fragment.
func (*API) GetTransaction ¶
func (*API) GetTranslateEntryReader ¶
func (api *API) GetTranslateEntryReader(ctx context.Context, offsets TranslateOffsetMap) (_ TranslateEntryReader, err error)
GetTranslateEntryReader provides an entry reader for key translation logs starting at offset.
func (*API) HostStates ¶ added in v2.3.0
func (*API) Hosts ¶
Hosts returns a list of the hosts in the cluster including their ID, URL, and which is the coordinator.
func (*API) Import ¶
func (api *API) Import(ctx context.Context, qcx *Qcx, req *ImportRequest, opts ...ImportOption) (err error)
Import avoids re-writing a bajillion tests to be transaction-aware by allowing a nil pQcx. It is convenient for some tests, particularly those in loops, to pass a nil qcx and treat the Import as having been commited when we return without error. We make it so.
func (*API) ImportAtomicRecord ¶ added in v2.2.0
func (api *API) ImportAtomicRecord(ctx context.Context, qcx *Qcx, req *AtomicRecord, opts ...ImportOption) error
func (*API) ImportColumnAttrs ¶
func (api *API) ImportColumnAttrs(ctx context.Context, req *ImportColumnAttrsRequest, opts ...ImportOption) error
func (*API) ImportRoaring ¶
func (api *API) ImportRoaring(ctx context.Context, indexName, fieldName string, shard uint64, remote bool, req *ImportRoaringRequest) (err0 error)
ImportRoaring is a low level interface for importing data to Pilosa when extremely high throughput is desired. The data must be encoded in a particular way which may be unintuitive (discussed below). The data is merged with existing data.
It takes as input a roaring bitmap which it uses as the data for the indicated index, field, and shard. The bitmap may be encoded according to the official roaring spec (https://github.com/RoaringBitmap/RoaringFormatSpec), or to the pilosa roaring spec which supports 64 bit integers (https://www.pilosa.com/docs/latest/architecture/#roaring-bitmap-storage-format).
The data should be encoded the same way that Pilosa stores fragments internally. A bit "i" being set in the input bitmap indicates that the bit is set in Pilosa row "i/ShardWidth", and in column (shard*ShardWidth)+(i%ShardWidth). That is to say that "data" represents all of the rows in this shard of this field concatenated together in one long bitmap.
func (*API) ImportValue ¶
func (api *API) ImportValue(ctx context.Context, qcx *Qcx, req *ImportValueRequest, opts ...ImportOption) error
ImportValue avoids re-writing a bajillion tests by allowing a nil pQcx. Then we will commit before returning.
func (*API) ImportValueWithTx ¶ added in v2.2.0
func (api *API) ImportValueWithTx(ctx context.Context, qcx *Qcx, req *ImportValueRequest, opts ...ImportOption) (err0 error)
ImportValue bulk imports values into a particular field.
func (*API) ImportWithTx ¶ added in v2.2.0
func (api *API) ImportWithTx(ctx context.Context, qcx *Qcx, req *ImportRequest, opts ...ImportOption) error
Import bulk imports data into a particular index,field,shard.
func (*API) IndexAttrDiff ¶
func (api *API) IndexAttrDiff(ctx context.Context, indexName string, blocks []AttrBlock) (map[uint64]map[string]interface{}, error)
IndexAttrDiff determines the local column attribute data blocks which differ from those provided.
func (*API) Info ¶
func (api *API) Info() serverInfo
Info returns information about this server instance.
func (*API) Inspect ¶
func (api *API) Inspect(ctx context.Context, req *InspectRequest) (*HolderInfo, error)
func (*API) LatticeVersion ¶ added in v2.2.0
Version returns the Lattice version.
func (*API) LongQueryTime ¶
LongQueryTime returns the configured threshold for logging/statting long running queries.
func (*API) MaxShards ¶
MaxShards returns the maximum shard number for each index in a map. TODO (2.0): This method has been deprecated. Instead, use AvailableShardsByIndex.
func (*API) PastQueries ¶ added in v2.4.0
func (*API) PrimaryReplicaNodeURL ¶
PrimaryReplicaNodeURL returns the URL of the cluster's primary replica.
func (*API) Query ¶
func (api *API) Query(ctx context.Context, req *QueryRequest) (QueryResponse, error)
Query parses a PQL query out of the request and executes it.
func (*API) RecalculateCaches ¶
RecalculateCaches forces all TopN caches to be updated. This is done internally within a TopN query, but a user may want to do it ahead of time?
func (*API) RemoveNode ¶
RemoveNode puts the cluster into the "RESIZING" state and begins the job of removing the given node.
func (*API) ReserveIDs ¶ added in v2.5.0
func (*API) ResetIDAlloc ¶ added in v2.5.0
func (*API) ResizeAbort ¶
ResizeAbort stops the current resize job.
func (*API) Schema ¶
Schema returns information about each index in Pilosa including which fields they contain.
func (*API) SchemaDetails ¶ added in v2.9.0
SchemaDetails returns information about each index in Pilosa including which fields they contain, and additional field information such as cardinality
func (*API) SetCoordinator ¶
SetCoordinator makes a new Node the cluster coordinator.
func (*API) ShardDistribution ¶ added in v2.3.0
ShardDistribution returns an object representing the distribution of shards across nodes for each index, distinguishing between primary and replica. The structure of this information is [indexName][nodeID][primaryOrReplica][]uint64. This function supports a view in the UI.
func (*API) ShardNodes ¶
ShardNodes returns the node and all replicas which should contain a shard's data.
func (*API) StartTransaction ¶
func (*API) State ¶
State returns the cluster state which is usually "NORMAL", but could be "STARTING", "RESIZING", or potentially others. See cluster.go for more details.
func (*API) StatsWithTags ¶
func (api *API) StatsWithTags(tags []string) stats.StatsClient
StatsWithTags returns an instance of whatever implementation of StatsClient pilosa is using with the given tags.
func (*API) Transactions ¶
func (*API) TranslateData ¶
func (api *API) TranslateData(ctx context.Context, indexName string, partition int) (io.WriterTo, error)
TranslateData returns all translation data in the specified partition.
func (*API) TranslateFieldDB ¶ added in v2.2.0
func (api *API) TranslateFieldDB(ctx context.Context, indexName, fieldName string, rd io.Reader) error
TranslateFieldDB is an internal function to load the field keys database
func (*API) TranslateIDs ¶
TranslateIDs handles a TranslateIDRequest.
func (*API) TranslateIndexDB ¶ added in v2.2.0
func (api *API) TranslateIndexDB(ctx context.Context, indexName string, partitionID int, rd io.Reader) error
TranslateIndexDB is an internal function to load the index keys database rd is a boltdb file.
func (*API) TranslateIndexIDs ¶
func (*API) TranslateIndexKey ¶
func (*API) TranslateKeys ¶
TranslateKeys handles a TranslateKeyRequest. ErrTranslatingKeyNotFound error will be swallowed here, so the empty response will be returned.
type ActiveQueryStatus ¶
type AllTranslatorSummary ¶ added in v2.2.0
type AllTranslatorSummary struct { Sums []*TranslatorSummary RepairNeeded bool }
func NewAllTranslatorSummary ¶ added in v2.2.0
func NewAllTranslatorSummary() *AllTranslatorSummary
func (*AllTranslatorSummary) Append ¶ added in v2.2.0
func (ats *AllTranslatorSummary) Append(b *AllTranslatorSummary)
func (*AllTranslatorSummary) Checksum ¶ added in v2.3.0
func (ats *AllTranslatorSummary) Checksum() string
func (*AllTranslatorSummary) Sort ¶ added in v2.2.0
func (ats *AllTranslatorSummary) Sort()
type AtomicRecord ¶ added in v2.2.0
type AtomicRecord struct { Index string Shard uint64 Ivr []*ImportValueRequest // BSI values Ir []*ImportRequest // other field types, e.g. single bit }
AtomicRecord applies all its Ivr and Ivr atomically, in a Tx. The top level Shard has to agree with Ivr[i].Shard and the Iv[i].Shard for all i included (in Ivr and Ir). The same goes for the top level Index: all records have to be writes to the same Index. These requirements are checked.
type AttrStore ¶
type AttrStore interface { Path() string Open() error Close() error Attrs(id uint64) (m map[string]interface{}, err error) SetAttrs(id uint64, m map[string]interface{}) error SetBulkAttrs(m map[uint64]map[string]interface{}) error Blocks() ([]AttrBlock, error) BlockData(i uint64) (map[uint64]map[string]interface{}, error) }
AttrStore represents an interface for handling row/column attributes.
type BadRequestError ¶
type BadRequestError struct {
// contains filtered or unexported fields
}
BadRequestError wraps an error value to signify that a request could not be read, decoded, or parsed such that in an HTTP scenario, http.StatusBadRequest would be returned.
func NewBadRequestError ¶
func NewBadRequestError(err error) BadRequestError
NewBadRequestError returns err wrapped in a BadRequestError.
type Barrier ¶ added in v2.6.0
type Barrier struct {
// contains filtered or unexported fields
}
Barrier allows us to temporarily halt all readers, so that a writer can commit alone and thus compact the db. The Barrier starts unblocked, alllowing passage to any caller of WaitAtGate().
func NewBarrier ¶ added in v2.6.0
func NewBarrier() (b *Barrier)
NewBarrier is either open, allowing immediate passage, or blocked, halting all callers at WaitAtGate() until the barrier is opened. By default it is open.
Barrier.Close() must be called when the barrier is no longer needed to avoid a goroutine leak.
func (*Barrier) BlockAllReadersNoWait ¶ added in v2.6.0
func (b *Barrier) BlockAllReadersNoWait()
BlockAllReadersNoWait raises the barrier to an infinite number of waiters and returns immediately to the caller.
func (*Barrier) BlockUntil ¶ added in v2.6.0
BlockUntil is called with a count, the number of waiters required to be present and waiting at the gate before call returns. A count of < 0 will return immediately and raise the barrier to any number of arriving readers. A count of 0 is a no-op.
Otherwise we raise the barrier and wait until we have seen count other goroutines waiting on it.
We return without releasing the waiters. Call Open when you want them to resume.
func (*Barrier) Close ¶ added in v2.6.0
func (b *Barrier) Close()
Close should be called to stop the barrier's background goroutine when you are done using the barrier.
func (*Barrier) UnblockReaders ¶ added in v2.6.0
func (b *Barrier) UnblockReaders()
Unblock lets all waiting goroutines resume execution.
func (*Barrier) WaitAtGate ¶ added in v2.6.0
WaitAtGate will return immediately if the barrier is unblocked. Otherwise it will not return until another goroutine unblocks the barrier.
type Bit ¶
Bit represents the intersection of a row and a column. It can be specified by integer ids or string keys.
type BitmapLikeFilter ¶ added in v2.7.0
type BitmapLikeFilter struct { roaring.BitmapRowFilterBase // contains filtered or unexported fields }
BitmapLikeFilter is a roaring.BitmapFilter which handles Like expressions.
func NewBitmapLikeFilter ¶ added in v2.7.0
func NewBitmapLikeFilter(like string, translator TranslateStore) *BitmapLikeFilter
func (*BitmapLikeFilter) ConsiderData ¶ added in v2.7.0
func (b *BitmapLikeFilter) ConsiderData(key roaring.FilterKey, data *roaring.Container) roaring.FilterResult
func (*BitmapLikeFilter) ConsiderKey ¶ added in v2.7.0
func (b *BitmapLikeFilter) ConsiderKey(key roaring.FilterKey, n int32) roaring.FilterResult
type BlockDataRequest ¶
BlockDataRequest describes the structure of a request for fragment block data.
type BlockDataResponse ¶
BlockDataResponse is the structured response of a block data request.
type BoltIterator ¶ added in v2.3.0
type BoltIterator struct {
// contains filtered or unexported fields
}
BoltIterator is the iterator returned from a BoltTx.ContainerIterator() call. It implements the roaring.ContainerIterator interface.
func NewBoltIterator ¶ added in v2.3.0
func NewBoltIterator(tx *BoltTx, prefix []byte) (bi *BoltIterator)
NewBoltIterator creates an iterator on tx that will only return boltKeys that start with prefix.
func (*BoltIterator) Close ¶ added in v2.3.0
func (bi *BoltIterator) Close()
Close tells the database and transaction that the user is done with the iterator.
func (*BoltIterator) Next ¶ added in v2.3.0
func (bi *BoltIterator) Next() (ok bool)
Next advances the iterator.
func (*BoltIterator) Seek ¶ added in v2.3.0
func (bi *BoltIterator) Seek(needle []byte) (ok bool)
Seek allows the iterator to start at needle instead of the global begining.
func (*BoltIterator) String ¶ added in v2.3.0
func (bi *BoltIterator) String() (r string)
func (*BoltIterator) Valid ¶ added in v2.3.0
func (bi *BoltIterator) Valid() bool
Valid returns false if there are no more values in the iterator's range.
func (*BoltIterator) ValidForPrefix ¶ added in v2.3.0
func (bi *BoltIterator) ValidForPrefix(prefix []byte) bool
type BoltTx ¶ added in v2.3.0
type BoltTx struct { Db *BoltWrapper DeleteEmptyContainer bool // contains filtered or unexported fields }
BoltTx wraps a bolt.Tx and provides the Tx interface method implementations. The methods on BoltTx are thread-safe, and can be called from different goroutines.
func (*BoltTx) Add ¶ added in v2.3.0
func (tx *BoltTx) Add(index, field, view string, shard uint64, batched bool, a ...uint64) (changeCount int, err error)
Add sets all the a bits hot in the specified fragment.
func (*BoltTx) ApplyFilter ¶ added in v2.7.0
func (*BoltTx) Commit ¶ added in v2.3.0
Commit commits the transaction to permanent storage. Commits can handle up to 100k updates to fragments at once, but not more. This is a BoltDB imposed limit.
func (*BoltTx) Container ¶ added in v2.3.0
func (tx *BoltTx) Container(index, field, view string, shard uint64, ckey uint64) (c *roaring.Container, err error)
Container returns the requested roaring.Container, selected by fragment and ckey
func (*BoltTx) ContainerIterator ¶ added in v2.3.0
func (tx *BoltTx) ContainerIterator(index, field, view string, shard uint64, firstRoaringContainerKey uint64) (citer roaring.ContainerIterator, found bool, err error)
key is the container key for the first roaring Container roaring docs: Iterator returns a ContainterIterator which *after* a call to Next(), a call to Value() will return the first container at or after key. found will be true if a container is found at key.
BoltTx notes: We auto-stop at the end of this shard, not going beyond.
func (*BoltTx) Contains ¶ added in v2.3.0
func (tx *BoltTx) Contains(index, field, view string, shard uint64, key uint64) (exists bool, err error)
Contains returns exists true iff the bit chosen by key is hot (set to 1) in specified fragment.
func (*BoltTx) Count ¶ added in v2.3.0
Count operates on the full bitmap level, so it sums over all the containers in the bitmap.
func (*BoltTx) CountRange ¶ added in v2.3.0
func (tx *BoltTx) CountRange(index, field, view string, shard uint64, start, end uint64) (n uint64, err error)
CountRange returns the count of hot bits in the start, end range on the fragment. roaring.countRange counts the number of bits set between [start, end).
func (*BoltTx) ForEachRange ¶ added in v2.3.0
func (tx *BoltTx) ForEachRange(index, field, view string, shard uint64, start, end uint64, fn func(uint64) error) error
ForEachRange applies fn on the selected range of bits on the chosen fragment.
func (*BoltTx) GetFieldSizeBytes ¶ added in v2.8.0
func (*BoltTx) GetSortedFieldViewList ¶ added in v2.7.0
func (*BoltTx) ImportRoaringBits ¶ added in v2.3.0
func (tx *BoltTx) ImportRoaringBits(index, field, view string, shard uint64, itr roaring.RoaringIterator, clear bool, log bool, rowSize uint64, data []byte) (changed int, rowSet map[uint64]int, err error)
ImportRoaringBits handles deletes by setting clear=true. rowSet[rowID] returns the number of bit changed on that rowID.
func (*BoltTx) IncrementOpN ¶ added in v2.3.0
IncrementOpN increments the tx opcount by changedN
func (*BoltTx) Max ¶ added in v2.3.0
Max is the maximum bit-value in your bitmap. Returns zero if the bitmap is empty. Odd, but this is what roaring.Max does.
func (*BoltTx) Min ¶ added in v2.3.0
Min returns the smallest bit set in the fragment. If no bit is hot, the second return argument is false.
func (*BoltTx) NewTxIterator ¶ added in v2.3.0
NewTxIterator returns a *roaring.Iterator that MUST have Close() called on it BEFORE the transaction Commits or Rollsback.
func (*BoltTx) OffsetRange ¶ added in v2.3.0
func (tx *BoltTx) OffsetRange(index, field, view string, shard, offset, start, endx uint64) (other *roaring.Bitmap, err error)
OffsetRange creates a new roaring.Bitmap to return in other. For all the hot bits in [start, endx) of the chosen fragment, it stores them into other but with offset added to their bit position. The primary client is doing this, using ShardWidth, already; see fragment.rowFromStorage() in fragment.go. For example:
data, err := tx.OffsetRange(f.index, f.field, f.view, f.shard, f.shard*ShardWidth, rowID*ShardWidth, (rowID+1)*ShardWidth) ^ offset ^ start ^ endx
The start and endx arguments are container keys that have been shifted left by 16 bits; their highbits() will be taken to determine the actual container keys. This is done to conform to the roaring.OffsetRange() argument convention.
func (*BoltTx) Pointer ¶ added in v2.3.0
Pointer gives us a memory address for the underlying transaction for debugging. It is public because we use it in roaring to report invalid container memory access outside of a transaction.
func (*BoltTx) PutContainer ¶ added in v2.3.0
func (tx *BoltTx) PutContainer(index, field, view string, shard uint64, ckey uint64, rc *roaring.Container) error
PutContainer stores rc under the specified fragment and container ckey.
func (*BoltTx) Remove ¶ added in v2.3.0
func (tx *BoltTx) Remove(index, field, view string, shard uint64, a ...uint64) (changeCount int, err error)
Remove clears all the specified a bits in the chosen fragment.
func (*BoltTx) RemoveContainer ¶ added in v2.3.0
RemoveContainer deletes the container specified by the shard and container key ckey
func (*BoltTx) RoaringBitmap ¶ added in v2.3.0
RoaringBitmap returns the roaring.Bitmap for all bits in the fragment.
func (*BoltTx) RoaringBitmapReader ¶ added in v2.3.0
func (*BoltTx) Rollback ¶ added in v2.3.0
func (tx *BoltTx) Rollback()
Rollback rolls back the transaction.
func (*BoltTx) UnionInPlace ¶ added in v2.3.0
func (tx *BoltTx) UnionInPlace(index, field, view string, shard uint64, others ...*roaring.Bitmap) error
UnionInPlace unions all the others Bitmaps into a new Bitmap, and then writes it to the specified fragment.
func (*BoltTx) UseRowCache ¶ added in v2.3.0
type BoltWrapper ¶ added in v2.3.0
type BoltWrapper struct { DeleteEmptyContainer bool // contains filtered or unexported fields }
BoltWrapper provides the NewTx() method.
func (*BoltWrapper) CleanupTx ¶ added in v2.3.0
func (w *BoltWrapper) CleanupTx(tx Tx)
func (*BoltWrapper) Close ¶ added in v2.3.0
func (w *BoltWrapper) Close() (err error)
Close shuts down the Bolt database.
func (*BoltWrapper) DeleteDBPath ¶ added in v2.3.0
func (w *BoltWrapper) DeleteDBPath(dbs *DBShard) (err error)
func (*BoltWrapper) DeleteField ¶ added in v2.3.0
func (w *BoltWrapper) DeleteField(index, field, fieldPath string) (err error)
func (*BoltWrapper) DeleteFragment ¶ added in v2.3.0
func (w *BoltWrapper) DeleteFragment(index, field, view string, shard uint64, frag interface{}) error
func (*BoltWrapper) DeleteIndex ¶ added in v2.3.0
func (w *BoltWrapper) DeleteIndex(indexName string) error
DeleteIndex deletes all the containers associated with the named index from the bolt database.
func (*BoltWrapper) DeletePrefix ¶ added in v2.3.0
func (w *BoltWrapper) DeletePrefix(prefix []byte) error
func (*BoltWrapper) HasData ¶ added in v2.3.0
func (w *BoltWrapper) HasData() (has bool, err error)
func (*BoltWrapper) IsClosed ¶ added in v2.3.0
func (w *BoltWrapper) IsClosed() (closed bool)
func (*BoltWrapper) NewTx ¶ added in v2.3.0
NewTx produces Bolt based transactions. If the transaction will modify data, then the write flag must be true. Read-only queries should set write to false, to allow more concurrency. Methods on a BoltTx are thread-safe, and can be called from different goroutines.
initialIndexName is optional. It is set by the TxFactory from the Txo options provided at the Tx creation point. It allows us to recognize and isolate cross-index queries more quickly. It can always be empty "" but when set is highly useful for debugging. It has no impact on transaction behavior.
func (*BoltWrapper) NewTxREAD ¶ added in v2.3.0
func (w *BoltWrapper) NewTxREAD() (*bolt.Tx, error)
NewTxREAD lets us see in the callstack dumps where the READ tx are.
func (*BoltWrapper) NewTxWRITE ¶ added in v2.3.0
func (w *BoltWrapper) NewTxWRITE() (*bolt.Tx, error)
NewTxWRITE lets us see in the callstack dumps where the WRITE tx are. Can't have more than one active write per database, so the 2nd one will block until the first finishes.
func (*BoltWrapper) OpenListString ¶ added in v2.3.0
func (w *BoltWrapper) OpenListString() (r string)
func (*BoltWrapper) OpenSnList ¶ added in v2.3.0
func (w *BoltWrapper) OpenSnList() (slc []int64)
func (*BoltWrapper) Path ¶ added in v2.3.0
func (w *BoltWrapper) Path() string
func (*BoltWrapper) SetHolder ¶ added in v2.3.0
func (w *BoltWrapper) SetHolder(h *Holder)
func (*BoltWrapper) StringifiedBoltKeys ¶ added in v2.3.0
func (w *BoltWrapper) StringifiedBoltKeys(optionalUseThisTx Tx, short bool) (r string)
StringifiedBoltKeys returns a string with all the container keys available in bolt.
type ClusterStatus ¶
ClusterStatus describes the status of the cluster including its state and node topology.
type CmdIO ¶
type CmdIO struct { Stdin io.Reader Stdout io.Writer Stderr io.Writer // contains filtered or unexported fields }
CmdIO holds standard unix inputs and outputs.
type ColumnAttrSet ¶
type ColumnAttrSet struct { ID uint64 `json:"id"` Key string `json:"key,omitempty"` Attrs map[string]interface{} `json:"attrs,omitempty"` }
ColumnAttrSet represents a set of attributes for a vertical column in an index. Can have a set of attributes attached to it.
func (ColumnAttrSet) MarshalJSON ¶
func (cas ColumnAttrSet) MarshalJSON() ([]byte, error)
MarshalJSON marshals the ColumnAttrSet to JSON such that either a Key or an ID is included.
type ConflictError ¶
type ConflictError struct {
// contains filtered or unexported fields
}
ConflictError wraps an error value to signify that a conflict with an existing resource occurred such that in an HTTP scenario, http.StatusConflict would be returned.
type CreateFieldMessage ¶
type CreateFieldMessage struct { Index string Field string CreatedAt int64 Meta *FieldOptions }
CreateFieldMessage is an internal message indicating field creation.
type CreateIndexMessage ¶
type CreateIndexMessage struct { Index string CreatedAt int64 Meta *IndexOptions }
CreateIndexMessage is an internal message indicating index creation.
type CreateShardMessage ¶
CreateShardMessage is an internal message indicating shard creation.
type CreateViewMessage ¶
CreateViewMessage is an internal message indicating view creation.
type DBHolder ¶ added in v2.2.0
func NewDBHolder ¶ added in v2.2.0
func NewDBHolder() *DBHolder
type DBPerShard ¶ added in v2.2.0
type DBPerShard struct { Mu sync.Mutex HolderDir string // just flat, not buried within the Node heirarchy. // Easily see how many we have. Flatmap map[flatkey]*DBShard RBFConfig *rbfcfg.Config // contains filtered or unexported fields }
func (*DBPerShard) Close ¶ added in v2.2.0
func (per *DBPerShard) Close() (err error)
func (*DBPerShard) Del ¶ added in v2.2.0
func (per *DBPerShard) Del(dbs *DBShard) (err error)
func (*DBPerShard) DeleteFieldFromStore ¶ added in v2.2.0
func (per *DBPerShard) DeleteFieldFromStore(index, field, fieldPath string) (err error)
func (*DBPerShard) DeleteFragment ¶ added in v2.2.0
func (per *DBPerShard) DeleteFragment(index, field, view string, shard uint64, frag *fragment) error
func (*DBPerShard) DeleteIndex ¶ added in v2.2.0
func (per *DBPerShard) DeleteIndex(index string) (err error)
func (*DBPerShard) DumpAll ¶ added in v2.2.0
func (per *DBPerShard) DumpAll()
func (*DBPerShard) GetDBShard ¶ added in v2.2.0
func (*DBPerShard) GetFieldView2ShardsMapForIndex ¶ added in v2.7.0
func (per *DBPerShard) GetFieldView2ShardsMapForIndex(idx *Index) (vs *FieldView2Shards, err error)
Note: cannot call this during migration, because it only ever returns the green shards if we are in blue-green.
func (*DBPerShard) HasData ¶ added in v2.2.0
func (per *DBPerShard) HasData(which int) (hasData bool, err error)
HasData returns true if the database has at least one key. For roaring it returns true if we a fragment stored. The `which` argument is the index into the per.W slice. 0 for blue, 1 for green. If you pass 1, be sure you have a blue-green configuration.
func (*DBPerShard) ListOpenString ¶ added in v2.2.0
func (per *DBPerShard) ListOpenString() (r string)
func (*DBPerShard) LoadExistingDBs ¶ added in v2.2.0
func (per *DBPerShard) LoadExistingDBs() (err error)
func (*DBPerShard) RoaringHasData ¶ added in v2.3.0
func (per *DBPerShard) RoaringHasData() (bool, error)
func (*DBPerShard) TypedDBPerShardGetShardsForIndex ¶ added in v2.2.0
func (per *DBPerShard) TypedDBPerShardGetShardsForIndex(ty txtype, idx *Index, roaringViewPath string, requireData bool) (shardMap map[uint64]bool, err error)
if roaringViewPath is "" then for ty == roaringTxn we go to disk to discover all the view paths under idx for type ty. requireData means open the database file and verify that at least one key is set. The returned sliceOfShards should not be modified. We will cache it for subsequent queries.
when a new DBShard is made, we will update the list of shards then. Thus the per.index2shard should always be up to date AFTER the first call here.
Note: we cannot here call GetView2ShardsMapForIndex() because that only ever returns the green data and we are used during migration for both blue and green.
type DBRegistry ¶ added in v2.2.0
type DBShard ¶ added in v2.2.0
type DBShard struct { HolderPath string Index string Shard uint64 Open bool W []DBWrapper ParentDBIndex *DBIndex // contains filtered or unexported fields }
func (*DBShard) AllFieldViews ¶ added in v2.7.0
func (*DBShard) Cleanup ¶ added in v2.2.0
Cleanup must be called at every commit/rollback of a Tx, in order to release the read-write mutex that guarantees a single writer at a time. Each tx must take care to call cleanup() exactly once. examples:
tx.o.dbs.Cleanup(tx) tx.Options().dbs.Cleanup(tx)
func (*DBShard) DeleteDBPath ¶ added in v2.2.0
func (*DBShard) DeleteFieldFromStore ¶ added in v2.2.0
func (*DBShard) DeleteFragment ¶ added in v2.2.0
func (*DBShard) HolderString ¶ added in v2.2.0
type DBWrapper ¶ added in v2.2.0
type DBWrapper interface { NewTx(write bool, initialIndexName string, o Txo) (tx Tx, err error) DeleteDBPath(dbs *DBShard) error Close() error DeleteFragment(index, field, view string, shard uint64, frag interface{}) error DeleteField(index, field, fieldPath string) error OpenListString() string OpenSnList() (sns []int64) Path() string HasData() (has bool, err error) SetHolder(h *Holder) }
type DeleteAvailableShardMessage ¶
DeleteAvailableShardMessage is an internal message indicating available shard deletion.
type DeleteFieldMessage ¶
DeleteFieldMessage is an internal message indicating field deletion.
type DeleteIndexMessage ¶
type DeleteIndexMessage struct {
Index string
}
DeleteIndexMessage is an internal message indicating index deletion.
type DeleteViewMessage ¶
DeleteViewMessage is an internal message indicating view deletion.
type DiskUsage ¶ added in v2.3.0
type DiskUsage struct { Capacity uint64 `json:"capacity,omitempty"` TotalUse uint64 `json:"totalInUse"` IndexUsage map[string]IndexUsage `json:"indexes"` }
DiskUsage represents the storage space used on disk by one node.
type Dumper ¶ added in v2.2.0
type Dumper interface {
// Dump is for debugging, what does this Tx see as its database?
AllDump()
}
type ExtractedIDColumn ¶ added in v2.2.0
type ExtractedIDMatrix ¶ added in v2.2.0
type ExtractedIDMatrix struct { Fields []string Columns []ExtractedIDColumn }
func (*ExtractedIDMatrix) Append ¶ added in v2.2.0
func (e *ExtractedIDMatrix) Append(m ExtractedIDMatrix)
type ExtractedTable ¶ added in v2.2.0
type ExtractedTable struct { Fields []ExtractedTableField `json:"fields"` Columns []ExtractedTableColumn `json:"columns"` }
func (ExtractedTable) ToRows ¶ added in v2.2.0
func (t ExtractedTable) ToRows(callback func(*pb.RowResponse) error) error
ToRows implements the ToRowser interface.
func (ExtractedTable) ToTable ¶ added in v2.2.0
func (t ExtractedTable) ToTable() (*pb.TableResponse, error)
ToTable converts the table to protobuf format.
type ExtractedTableColumn ¶ added in v2.2.0
type ExtractedTableColumn struct { Column KeyOrID `json:"column"` Rows []interface{} `json:"rows"` }
type ExtractedTableField ¶ added in v2.2.0
type Field ¶
type Field struct { Stats stats.StatsClient // Instantiates new translation stores OpenTranslateStore OpenTranslateStoreFunc // contains filtered or unexported fields }
Field represents a container for views.
func NewField ¶
func NewField(holder *Holder, path, index, name string, opts FieldOption) (*Field, error)
NewField returns a new instance of field. NOTE: This function is only used in tests, which is why it only takes a single `FieldOption` (the assumption being that it's of the type `OptFieldType*`). This means this function couldn't be used to set, for example, `FieldOptions.Keys`.
func (*Field) AddRemoteAvailableShards ¶
AddRemoteAvailableShards merges the set of available shards into the current known set and saves the set to a file.
func (*Field) AvailableShards ¶
AvailableShards returns a bitmap of shards that contain data.
func (*Field) ClearValue ¶
ClearValue removes a field value for a column.
func (*Field) ForeignIndex ¶
ForeignIndex returns the foreign index name attached to the field. Returns blank string if no foreign index exists.
func (*Field) Import ¶
func (f *Field) Import(qcx *Qcx, rowIDs, columnIDs []uint64, timestamps []*time.Time, opts ...ImportOption) (err0 error)
Import bulk imports data.
func (*Field) LocalAvailableShards ¶ added in v2.2.0
LocalAvailableShards returns a bitmap of shards that contain data, but only from the local node. This prevents txfactory from making db-per-shard for remote shards.
func (*Field) MaxForShard ¶
func (*Field) MinForShard ¶
MinForShard returns the minimum value which appears in this shard (this field must be an Int or Decimal field). It also returns the number of times the minimum value appears.
func (*Field) Options ¶
func (f *Field) Options() FieldOptions
Options returns all options for this field.
func (*Field) RemoveAvailableShard ¶
RemoveAvailableShard removes a shard from the bitmap cache.
NOTE: This can be overridden on the next sync so all nodes should be updated.
func (*Field) Row ¶
Row returns a row of the standard view. It seems this method is only being used by the test package, and the fact that it's only allowed on `set`,`mutex`, and `bool` fields is odd. This may be considered for deprecation in a future version.
func (*Field) RowAttrStore ¶
RowAttrStore returns the attribute storage.
func (*Field) RowTime ¶
RowTime gets the row at the particular time with the granularity specified by the quantum.
func (*Field) SetCacheSize ¶
SetCacheSize sets the cache size for ranked fames. Persists to meta file on update. defaults to DefaultCacheSize 50000
func (*Field) StringValue ¶
StringValue reads an integer field value for a column, and converts it to a string based on a foreign index string key.
func (*Field) TimeQuantum ¶
func (f *Field) TimeQuantum() TimeQuantum
TimeQuantum returns the time quantum for the field.
func (*Field) TranslateStore ¶
func (f *Field) TranslateStore() TranslateStore
TranslateStore returns the field's translation store.
func (*Field) TranslateStorePath ¶
TranslateStorePath returns the translation database path for the field.
type FieldInfo ¶
type FieldInfo struct { Name string `json:"name"` CreatedAt int64 `json:"createdAt,omitempty"` Options FieldOptions `json:"options"` Cardinality *uint64 `json:"cardinality,omitempty"` Views []*ViewInfo `json:"views,omitempty"` }
FieldInfo represents schema information for a field.
type FieldOption ¶
type FieldOption func(fo *FieldOptions) error
FieldOption is a functional option type for pilosa.fieldOptions.
func OptFieldForeignIndex ¶
func OptFieldForeignIndex(index string) FieldOption
OptFieldForeignIndex marks this field as a foreign key to another index. That is, the values of this field should be interpreted as referencing records (Pilosa columns) in another index. TODO explain where/how this is used by Pilosa.
func OptFieldKeys ¶
func OptFieldKeys() FieldOption
OptFieldKeys is a functional option on FieldOptions used to specify whether keys are used for this field.
func OptFieldTypeBool ¶
func OptFieldTypeBool() FieldOption
OptFieldTypeBool is a functional option on FieldOptions used to specify the field as being type `bool` and to provide any respective configuration values.
func OptFieldTypeDecimal ¶
func OptFieldTypeDecimal(scale int64, minmax ...pql.Decimal) FieldOption
OptFieldTypeDecimal is a functional option for creating a `decimal` field. Unless we decide to expand the range of supported values, `scale` is restricted to the range [0,19]. This supports anything from:
scale = 0: min: -9223372036854775808. max: 9223372036854775807.
to:
scale = 19: min: -0.9223372036854775808 max: 0.9223372036854775807
While it's possible to support scale values outside of this range, the coverage for those scales are no longer continuous. For example,
scale = -2: min : [-922337203685477580800, -100] GAPs: [-99, -1], [-199, -101] ... [-922337203685477580799, -922337203685477580701]
0
max : [100, 922337203685477580700] GAPs: [1, 99], [101, 199] ... [922337203685477580601, 922337203685477580699]
An alternative to this gap strategy would be to scale the supported range to a continuous 64-bit space (which is not unreasonable using bsiGroup.Base). The issue with this approach is that we would need to know which direction to favor. For example, there are two possible ranges for `scale = -2`:
min : [-922337203685477580800, -922337203685477580800+(2^64)] max : [922337203685477580700-(2^64), 922337203685477580700]
func OptFieldTypeDefault ¶
func OptFieldTypeDefault() FieldOption
OptFieldTypeDefault is a functional option on FieldOptions used to set the field type and cache setting to the default values.
func OptFieldTypeInt ¶
func OptFieldTypeInt(min, max int64) FieldOption
OptFieldTypeInt is a functional option on FieldOptions used to specify the field as being type `int` and to provide any respective configuration values.
func OptFieldTypeMutex ¶
func OptFieldTypeMutex(cacheType string, cacheSize uint32) FieldOption
OptFieldTypeMutex is a functional option on FieldOptions used to specify the field as being type `mutex` and to provide any respective configuration values.
func OptFieldTypeSet ¶
func OptFieldTypeSet(cacheType string, cacheSize uint32) FieldOption
OptFieldTypeSet is a functional option on FieldOptions used to specify the field as being type `set` and to provide any respective configuration values.
func OptFieldTypeTime ¶
func OptFieldTypeTime(timeQuantum TimeQuantum, opt ...bool) FieldOption
OptFieldTypeTime is a functional option on FieldOptions used to specify the field as being type `time` and to provide any respective configuration values. Pass true to skip creation of the standard view.
type FieldOptions ¶
type FieldOptions struct { Base int64 `json:"base,omitempty"` BitDepth uint `json:"bitDepth,omitempty"` Min pql.Decimal `json:"min,omitempty"` Max pql.Decimal `json:"max,omitempty"` Scale int64 `json:"scale,omitempty"` Keys bool `json:"keys"` NoStandardView bool `json:"noStandardView,omitempty"` CacheSize uint32 `json:"cacheSize,omitempty"` CacheType string `json:"cacheType,omitempty"` Type string `json:"type,omitempty"` TimeQuantum TimeQuantum `json:"timeQuantum,omitempty"` ForeignIndex string `json:"foreignIndex"` }
FieldOptions represents options to set when initializing a field.
func (*FieldOptions) MarshalJSON ¶
func (o *FieldOptions) MarshalJSON() ([]byte, error)
MarshalJSON marshals FieldOptions to JSON such that only those attributes associated to the field type are included.
type FieldRow ¶
type FieldRow struct { Field string `json:"field"` RowID uint64 `json:"rowID"` RowKey string `json:"rowKey,omitempty"` Value *int64 `json:"value,omitempty"` }
FieldRow is used to distinguish rows in a group by result.
func (FieldRow) MarshalJSON ¶
MarshalJSON marshals FieldRow to JSON such that either a Key or an ID is included.
type FieldStatus ¶
FieldStatus is an internal message representing the contents of a field.
type FieldUsage ¶ added in v2.8.0
type FieldUsage struct { Total uint64 `json:"total"` Fragments uint64 `json:"fragments"` Keys uint64 `json:"keys"` Metadata uint64 `json:"metadata"` }
FieldUsage represents the storage space used on disk by one field, on one node
type FieldValue ¶
FieldValue represents the value for a column within a range-encoded field.
type FieldView2Shards ¶ added in v2.7.0
type FieldView2Shards struct {
// contains filtered or unexported fields
}
func NewFieldView2Shards ¶ added in v2.7.0
func NewFieldView2Shards() *FieldView2Shards
func (*FieldView2Shards) String ¶ added in v2.7.0
func (vs *FieldView2Shards) String() (r string)
type FileSystem ¶ added in v2.2.0
type FileSystem interface {
New() (http.FileSystem, error)
}
FileSystem represents an interface for file system for serving the Lattice UI.
var NopFileSystem FileSystem
NopFileSystem represents a FileSystem that returns an error if called.
type FragSum ¶ added in v2.3.0
type FragSum struct { AbsPath string RelPath string // critically, NodeID is how pilosa-fsck figures out if this // fragment should be deleted if it is on a node it should not be. NodeID string Index string Field string View string Shard uint64 Hotbits int Checksum string Primary int ScanDone bool // pilosa-fsck will set this once done to avoid repairing multiple times. }
used in IndexFragmentSummary
type FragmentBlock ¶
FragmentBlock represents info about a subsection of the rows in a block. This is used for comparing data in remote blocks for active anti-entropy.
type FragmentInfo ¶
type FragmentInfo struct { BitmapInfo roaring.BitmapInfo BlockChecksums []FragmentBlock `json:"BlockChecksums,omitempty"` }
type GCNotifier ¶
type GCNotifier interface { Close() AfterGC() <-chan struct{} }
GCNotifier represents an interface for garbage collection notificationss.
var NopGCNotifier GCNotifier = &nopGCNotifier{}
NopGCNotifier represents a GCNotifier that doesn't do anything.
type GroupCount ¶
type GroupCount struct { Group []FieldRow `json:"group"` Count uint64 `json:"count"` Agg int64 `json:"-"` }
GroupCount represents a result item for a group by query.
func (*GroupCount) Clone ¶ added in v2.2.0
func (g *GroupCount) Clone() (r *GroupCount)
func (GroupCount) Compare ¶
func (g GroupCount) Compare(o GroupCount) int
Compare is used in ordering two GroupCount objects.
type GroupCounts ¶
type GroupCounts struct {
// contains filtered or unexported fields
}
GroupCounts is a list of GroupCount.
func NewGroupCounts ¶ added in v2.8.0
func NewGroupCounts(agg string, groups ...GroupCount) *GroupCounts
NewGroupCounts creates a GroupCounts with the given type and slice of GroupCount objects. There's intentionally no externally-accessible way to change the []GroupCount after creation.
func (*GroupCounts) AggregateColumn ¶ added in v2.8.0
func (g *GroupCounts) AggregateColumn() string
AggregateColumn gives the likely column name to use for aggregates, because for historical reasons we used "sum" when it was a sum, but don't want to use that when it's something else. This will likely get revisited.
func (*GroupCounts) Groups ¶ added in v2.8.0
func (g *GroupCounts) Groups() []GroupCount
Groups is a convenience method to let us not worry as much about the potentially-nil nature of a *GroupCounts.
func (*GroupCounts) MarshalJSON ¶ added in v2.8.0
func (g *GroupCounts) MarshalJSON() ([]byte, error)
MarshalJSON makes GroupCounts satisfy interface json.Marshaler and customizes the JSON output of the aggregate field label.
func (*GroupCounts) ToRows ¶
func (g *GroupCounts) ToRows(callback func(*pb.RowResponse) error) error
ToRows implements the ToRowser interface.
func (*GroupCounts) ToTable ¶
func (g *GroupCounts) ToTable() (*pb.TableResponse, error)
ToTable implements the ToTabler interface.
type Handler ¶
Handler is the interface for the data handler, a wrapper around Pilosa's data store.
var NopHandler Handler = nopHandler{}
NopHandler is a no-op implementation of the Handler interface.
type Hasher ¶
type Hasher interface { // Hashes the key into a number between [0,N). Hash(key uint64, n int) int Name() string }
Hasher represents an interface to hash integers into buckets.
type Holder ¶
type Holder struct { NewAttrStore func(string) AttrStore // Stats Stats stats.StatsClient Logger logger.Logger SnapshotQueue SnapshotQueue // Instantiates new translation stores OpenTranslateStore OpenTranslateStoreFunc OpenTranslateReader OpenTranslateReaderFunc // Func to open whatever implementation of transaction store we're using. OpenTransactionStore OpenTransactionStoreFunc // Func to open the ID allocator. OpenIDAllocator func(string) (*idAllocator, error) Opts HolderOpts Auditor testhook.Auditor // contains filtered or unexported fields }
Holder represents a container for indexes.
func NewHolder ¶
func NewHolder(path string, cfg *HolderConfig) *Holder
NewHolder returns a new instance of Holder for the given path.
func (*Holder) Activate ¶
func (h *Holder) Activate()
Activate runs the background tasks relevant to keeping a holder in a stable state, such as scanning it for needed snapshots, or flushing caches. This is separate from opening because, while a server would nearly always want to do this, other use cases (like consistency checks of a data directory) need to avoid it even getting started.
func (*Holder) BeginTx ¶ added in v2.2.0
Begin starts a transaction on the holder. The index and shard must be specified.
func (*Holder) CreateIndex ¶
func (h *Holder) CreateIndex(name string, opt IndexOptions) (*Index, error)
CreateIndex creates an index. An error is returned if the index already exists.
func (*Holder) CreateIndexIfNotExists ¶
func (h *Holder) CreateIndexIfNotExists(name string, opt IndexOptions) (*Index, error)
CreateIndexIfNotExists returns an index by name. The index is created if it does not already exist.
func (*Holder) DeleteIndex ¶
DeleteIndex removes an index from the holder.
func (*Holder) DumpAllShards ¶ added in v2.2.0
func (h *Holder) DumpAllShards()
func (*Holder) FinishTransaction ¶
func (*Holder) GetTransaction ¶
func (*Holder) HasData ¶
HasData returns true if Holder contains at least one index. This is used to determine if the rebalancing of data is necessary when a node joins the cluster.
func (*Holder) HasRoaringData ¶ added in v2.2.0
func (*Holder) HolderPathFromIndexPath ¶ added in v2.2.0
HolderPathFromIndexPath is used by test/index.go:71 in test.Index.Reopen() to get the right path into a test Holder that doesn't know its own proper path. If the Holder changes index paths to being something other than holderPath + "/" + indexName, this will need adjusting too.
func (*Holder) Inspect ¶
func (h *Holder) Inspect(ctx context.Context, req *InspectRequest) (*HolderInfo, error)
func (*Holder) LoadNodeID ¶ added in v2.3.0
func (*Holder) NeedsSnapshot ¶ added in v2.2.0
func (*Holder) Process ¶
func (h *Holder) Process(ctx context.Context, op HolderOperator) (err error)
Process loops through a holder based on the Check functions in op, calling the Process functions in op when indicated.
func (*Holder) Schema ¶
Schema returns schema information for all indexes, fields, and views. If includeHiddenAndViews=true, include fields beginning with "_", as well as view details.
func (*Holder) StartTransaction ¶
func (*Holder) Transactions ¶
type HolderConfig ¶ added in v2.2.0
type HolderConfig struct { PartitionN int OpenTranslateStore OpenTranslateStoreFunc OpenTranslateReader OpenTranslateReaderFunc OpenTransactionStore OpenTransactionStoreFunc OpenIDAllocator OpenIDAllocatorFunc TranslationSyncer TranslationSyncer CacheFlushInterval time.Duration StatsClient stats.StatsClient NewAttrStore func(string) AttrStore Logger logger.Logger Txsrc string RowcacheOn bool RBFConfig *rbfcfg.Config AntiEntropyInterval time.Duration }
HolderConfig holds configuration details that need to be set up at initial holder creation. NewHolder takes a *HolderConfig, which can be nil. Use DefaultHolderConfig to get a default-valued HolderConfig you can then alter.
func DefaultHolderConfig ¶ added in v2.2.0
func DefaultHolderConfig() *HolderConfig
type HolderFilter ¶
type HolderFilter interface { CheckIndex(iname string) (process bool, recurse bool) CheckField(iname, fname string) (process bool, recurse bool) CheckView(iname, fname, vname string) (process bool, recurse bool) CheckFragment(iname, fname, vname string, shard uint64) (process bool) }
HolderFilter represents something that potentially filters out parts of a holder, indicating whether or not to process them, or recurse into them. It is permissible to recurse a thing without processing it, or process it without recursing it. For instance, something looking to accumulate statistics about views might return (true, false) from CheckView, while a fragment scanning operation would return (false, true) from everything above CheckFrag.
func NewHolderFilter ¶
func NewHolderFilter(params HolderFilterParams) (result HolderFilter, err error)
type HolderFilterAll ¶
type HolderFilterAll struct{}
HolderFilterAll is a placeholder type which always returns true for the check functions. You can embed it to make a HolderOperator which processes everything.
func (HolderFilterAll) CheckField ¶
func (HolderFilterAll) CheckField(string, string) (bool, bool)
func (HolderFilterAll) CheckFragment ¶
func (HolderFilterAll) CheckIndex ¶
func (HolderFilterAll) CheckIndex(string) (bool, bool)
type HolderFilterParams ¶
type HolderInfo ¶
type HolderInfo struct { FragmentInfo map[string]FragmentInfo FragmentNames []string }
type HolderOperator ¶
type HolderOperator interface { HolderFilter HolderProcess }
HolderOperator is both a filter and a process. This is the general form of "I want to do something to some part of a holder."
type HolderOpts ¶
type HolderOpts struct { // ReadOnly indicates that this holder's contents should not produce // disk writes under any circumstances. It must be set before Open // is called, and changing it is not supported. ReadOnly bool // If Inspect is set, we'll try to obtain additional information // about fragments when opening them. Inspect bool // Txsrc controls the tx/storage engine we instatiate. Set by // server.go OptServerTxsrc Txsrc string // RowcacheOn, if true, turns on the row cache for all storage backends. RowcacheOn bool }
HolderOpts holds information about the holder which other things might want to look up later while using the holder.
type HolderProcess ¶
type HolderProcess interface { ProcessIndex(*Index) error ProcessField(*Field) error ProcessView(*view) error ProcessFragment(*fragment) error }
HolderProcess represents something that has operations which can be performed on indexes, fields, views, and/or fragments.
type HolderProcessNone ¶
type HolderProcessNone struct{}
HolderProcessNone is a placeholder type which does nothing for the process functions. You can embed it to make a HolderOperator which does nothing, or embed it and provide your own ProcessFragment to do just that.
func (HolderProcessNone) ProcessField ¶
func (HolderProcessNone) ProcessField(*Field) error
func (HolderProcessNone) ProcessFragment ¶
func (HolderProcessNone) ProcessFragment(*fragment) error
func (HolderProcessNone) ProcessIndex ¶
func (HolderProcessNone) ProcessIndex(*Index) error
func (HolderProcessNone) ProcessView ¶
func (HolderProcessNone) ProcessView(*view) error
type IDAllocCommitRequest ¶ added in v2.5.0
type IDAllocCommitRequest struct { Key IDAllocKey `json:"key"` Session [32]byte `json:"session"` Count uint64 `json:"count"` }
type IDAllocKey ¶ added in v2.5.0
IDAllocKey is an ID allocation key.
func (IDAllocKey) String ¶ added in v2.5.0
func (k IDAllocKey) String() string
type IDAllocReserveRequest ¶ added in v2.5.0
type IDAllocReserveRequest struct { Key IDAllocKey `json:"key"` Session [32]byte `json:"session"` Offset uint64 `json:"offset"` Count uint64 `json:"count"` }
type ImportColumnAttrsRequest ¶
type ImportColumnAttrsRequest struct { AttrKey string ColumnIDs []uint64 AttrVals []string Shard int64 Index string IndexCreatedAt int64 }
ImportColumnAttrsRequest describes the import request structure for a ColumnAttr import.
type ImportOption ¶
type ImportOption func(*ImportOptions) error
ImportOption is a functional option type for API.Import.
func OptImportOptionsClear ¶
func OptImportOptionsClear(c bool) ImportOption
OptImportOptionsClear is a functional option on ImportOption used to specify whether the import is a set or clear operation.
func OptImportOptionsIgnoreKeyCheck ¶
func OptImportOptionsIgnoreKeyCheck(b bool) ImportOption
OptImportOptionsIgnoreKeyCheck is a functional option on ImportOption used to specify whether key check should be ignored.
func OptImportOptionsPresorted ¶
func OptImportOptionsPresorted(b bool) ImportOption
type ImportOptions ¶
type ImportOptions struct { Clear bool IgnoreKeyCheck bool Presorted bool // test Tx atomicity if > 0 SimPowerLossAfter int }
ImportOptions holds the options for the API.Import method.
TODO(2.0) we have entirely missed the point of functional options by exporting this structure. If it needs to be exported for some reason, we should consider not using functional options here which just adds complexity.
type ImportRequest ¶
type ImportRequest struct { Index string IndexCreatedAt int64 Field string FieldCreatedAt int64 Shard uint64 RowIDs []uint64 ColumnIDs []uint64 RowKeys []string ColumnKeys []string Timestamps []int64 Clear bool }
ImportRequest describes the import request structure for an import. BSIs use the ImportValueRequest instead.
func (*ImportRequest) ValidateWithTimestamp ¶
func (ir *ImportRequest) ValidateWithTimestamp(indexCreatedAt, fieldCreatedAt int64) error
ValidateWithTimestamp ensures that the payload of the request is valid.
type ImportResponse ¶
type ImportResponse struct {
Err string
}
ImportResponse is the structured response of an import.
type ImportRoaringRequest ¶
type ImportRoaringRequest struct { IndexCreatedAt int64 FieldCreatedAt int64 Clear bool Action string // [set, clear, overwrite] Block int Views map[string][]byte UpdateExistence bool }
ImportRoaringRequest describes the import request structure for an import containing roaring-encoded data.
func (*ImportRoaringRequest) ValidateWithTimestamp ¶
func (irr *ImportRoaringRequest) ValidateWithTimestamp(indexCreatedAt, fieldCreatedAt int64) error
ValidateWithTimestamp ensures that the payload of the request is valid.
type ImportValueRequest ¶
type ImportValueRequest struct { Index string IndexCreatedAt int64 Field string FieldCreatedAt int64 // if Shard is MaxUint64 (an impossible shard value), this // indicates that the column IDs may come from multiple shards. Shard uint64 ColumnIDs []uint64 // e.g. weather stationID ColumnKeys []string Values []int64 // e.g. temperature, humidity, barometric pressure FloatValues []float64 StringValues []string Clear bool }
ImportValueRequest describes the import request structure for a value (BSI) import. Note: no RowIDs here. have to convert BSI Values into RowIDs internally.
func (*ImportValueRequest) Len ¶
func (ivr *ImportValueRequest) Len() int
func (*ImportValueRequest) Less ¶
func (ivr *ImportValueRequest) Less(i, j int) bool
func (*ImportValueRequest) Swap ¶
func (ivr *ImportValueRequest) Swap(i, j int)
func (*ImportValueRequest) Validate ¶
func (ivr *ImportValueRequest) Validate() error
Validate ensures that the payload of the request is valid.
func (*ImportValueRequest) ValidateWithTimestamp ¶
func (ivr *ImportValueRequest) ValidateWithTimestamp(indexCreatedAt, fieldCreatedAt int64) error
ValidateWithTimestamp ensures that the payload of the request is valid.
type InMemTransactionStore ¶
type InMemTransactionStore struct {
// contains filtered or unexported fields
}
InMemTransactionStore does not persist transaction data and is only useful for testing.
func NewInMemTransactionStore ¶
func NewInMemTransactionStore() *InMemTransactionStore
func (*InMemTransactionStore) Get ¶
func (s *InMemTransactionStore) Get(id string) (*Transaction, error)
func (*InMemTransactionStore) List ¶
func (s *InMemTransactionStore) List() (map[string]*Transaction, error)
func (*InMemTransactionStore) Put ¶
func (s *InMemTransactionStore) Put(trns *Transaction) error
func (*InMemTransactionStore) Remove ¶
func (s *InMemTransactionStore) Remove(id string) (*Transaction, error)
type InMemTranslateStore ¶
type InMemTranslateStore struct {
// contains filtered or unexported fields
}
InMemTranslateStore is an in-memory storage engine for mapping keys to int values.
func NewInMemTranslateStore ¶
func NewInMemTranslateStore(index, field string, partitionID, partitionN int) *InMemTranslateStore
NewInMemTranslateStore returns a new instance of InMemTranslateStore.
func (*InMemTranslateStore) Close ¶
func (s *InMemTranslateStore) Close() error
func (*InMemTranslateStore) ComputeTranslatorSummaryCols ¶ added in v2.3.0
func (s *InMemTranslateStore) ComputeTranslatorSummaryCols(partitionID int, topo *Topology) (sum *TranslatorSummary, err error)
func (*InMemTranslateStore) ComputeTranslatorSummaryRows ¶ added in v2.3.0
func (s *InMemTranslateStore) ComputeTranslatorSummaryRows() (sum *TranslatorSummary, err error)
func (*InMemTranslateStore) CreateKeys ¶ added in v2.1.8
func (s *InMemTranslateStore) CreateKeys(keys ...string) (map[string]uint64, error)
CreateKeys maps all keys to IDs, creating the IDs if they do not exist. If the translator is read-only, this will return an error.
func (*InMemTranslateStore) EntryReader ¶
func (s *InMemTranslateStore) EntryReader(ctx context.Context, offset uint64) (TranslateEntryReader, error)
EntryReader returns an error. Replication is not supported.
func (*InMemTranslateStore) FindKeys ¶ added in v2.1.8
func (s *InMemTranslateStore) FindKeys(keys ...string) (map[string]uint64, error)
FindKeys looks up the ID for each key. Keys are not created if they do not exist. Missing keys are not considered errors, so the length of the result may be less than that of the input.
func (*InMemTranslateStore) ForceSet ¶
func (s *InMemTranslateStore) ForceSet(id uint64, key string) error
ForceSet writes the id/key pair to the db. Used by replication.
func (*InMemTranslateStore) GetStorePath ¶ added in v2.3.0
func (s *InMemTranslateStore) GetStorePath() string
func (*InMemTranslateStore) IDWalker ¶ added in v2.3.0
func (s *InMemTranslateStore) IDWalker(walk func(key string, col uint64)) error
IDWalker executes walk for every pair in the database
func (*InMemTranslateStore) KeyWalker ¶ added in v2.3.0
func (s *InMemTranslateStore) KeyWalker(walk func(key string, col uint64)) error
KeyWalker executes walk for every pair in the database
func (*InMemTranslateStore) MaxID ¶
func (s *InMemTranslateStore) MaxID() (uint64, error)
MaxID returns the highest identifier in the store.
func (*InMemTranslateStore) PartitionID ¶
func (s *InMemTranslateStore) PartitionID() int
PartitionID returns the partition id the store was initialized with.
func (*InMemTranslateStore) ReadFrom ¶
func (s *InMemTranslateStore) ReadFrom(r io.Reader) (count int64, err error)
ReadFrom implements io.ReaderFrom. It's not efficient or careful, but we don't expect to use InMemTranslateStore much, it's mostly there to avoid disk load during testing.
func (*InMemTranslateStore) ReadOnly ¶
func (s *InMemTranslateStore) ReadOnly() bool
ReadOnly returns true if the store is in read-only mode.
func (*InMemTranslateStore) RepairKeys ¶ added in v2.3.0
func (s *InMemTranslateStore) RepairKeys(topo *Topology, verbose, applyKeyRepairs bool) (changed bool, err error)
func (*InMemTranslateStore) SetReadOnly ¶
func (s *InMemTranslateStore) SetReadOnly(v bool)
SetReadOnly toggles the read-only mode of the store.
func (*InMemTranslateStore) TranslateID ¶
func (s *InMemTranslateStore) TranslateID(id uint64) (string, error)
TranslateID converts an integer ID to a string key. Returns a blank string if ID does not exist.
func (*InMemTranslateStore) TranslateIDs ¶
func (s *InMemTranslateStore) TranslateIDs(ids []uint64) ([]string, error)
TranslateIDs converts a list of integer IDs to a list of string keys.
func (*InMemTranslateStore) TranslateKey ¶
func (s *InMemTranslateStore) TranslateKey(key string, writable bool) (uint64, error)
TranslateKey converts a string key to an integer ID. If key does not have an associated id then one is created.
func (*InMemTranslateStore) TranslateKeys ¶
func (s *InMemTranslateStore) TranslateKeys(keys []string, writable bool) (_ []uint64, err error)
TranslateKeys converts a string key to an integer ID. If key does not have an associated id then one is created.
func (*InMemTranslateStore) WriteNotify ¶
func (s *InMemTranslateStore) WriteNotify() <-chan struct{}
WriteNotify returns a channel that is closed when a new entry is written.
type Index ¶
type Index struct { Stats stats.StatsClient // Instantiates new translation stores OpenTranslateStore OpenTranslateStoreFunc // contains filtered or unexported fields }
Index represents a container for fields.
func NewIndex ¶
NewIndex returns an existing (but possibly empty) instance of Index at path. It will not erase any prior content.
func (*Index) AvailableShards ¶
AvailableShards returns a bitmap of all shards with data in the index.
func (*Index) ColumnAttrStore ¶
ColumnAttrStore returns the storage for column attributes.
func (*Index) ComputeTranslatorSummary ¶ added in v2.2.0
func (idx *Index) ComputeTranslatorSummary(verbose, checkKeys, applyKeyRepairs bool, topo *Topology, nodeID string, parallelReaders int) (ats *AllTranslatorSummary, err error)
sums is only guaranteed to be sorted by (index, PartitionID, field) iff err returns nil
func (*Index) CreateField ¶
func (i *Index) CreateField(name string, opts ...FieldOption) (*Field, error)
CreateField creates a field.
func (*Index) CreateFieldIfNotExists ¶
func (i *Index) CreateFieldIfNotExists(name string, opts ...FieldOption) (*Field, error)
CreateFieldIfNotExists creates a field with the given options if it doesn't exist.
func (*Index) DeleteField ¶
DeleteField removes a field from the index.
func (*Index) Dump ¶ added in v2.2.0
Dump prints to stdout the contents of the roaring Containers stored in idx. Mostly for debugging.
func (*Index) NeedsSnapshot ¶ added in v2.2.0
func (*Index) OpenWithTimestamp ¶
OpenWithTimestamp opens and initializes the index and set a new CreatedAt timestamp for fields.
func (*Index) Options ¶
func (i *Index) Options() IndexOptions
Options returns all options for this index.
func (*Index) QualifiedName ¶ added in v2.2.0
QualifiedName returns the qualified name of the index.
func (*Index) SliceOfShards ¶ added in v2.2.0
func (*Index) StringifiedRoaringKeys ¶ added in v2.2.0
hashOnly means only show the value hash, not the content bits. showOps means display the ops log.
func (*Index) TranslateStore ¶
func (i *Index) TranslateStore(partitionID int) TranslateStore
TranslateStore returns the translation store for a given partition.
func (*Index) TranslateStorePath ¶
TranslateStorePath returns the translation database path for a partition.
func (*Index) WriteFragmentChecksums ¶ added in v2.2.0
func (idx *Index) WriteFragmentChecksums(w io.Writer, showBits, showOps bool, topo *Topology, verbose bool) (sum *IndexFragmentSummary)
if verbose, then print to w.
type IndexFragmentSummary ¶ added in v2.3.0
type IndexFragmentSummary struct { Dir string NodeID string Index string IndexPath string Frg []*FragSum RelPath2fsum map[string]*FragSum }
returned by WriteFragmentChecksums
func (*IndexFragmentSummary) String ¶ added in v2.3.0
func (ifs *IndexFragmentSummary) String() (s string)
type IndexInfo ¶
type IndexInfo struct { Name string `json:"name"` CreatedAt int64 `json:"createdAt,omitempty"` Options IndexOptions `json:"options"` Fields []*FieldInfo `json:"fields"` ShardWidth uint64 `json:"shardWidth"` }
IndexInfo represents schema information for an index.
type IndexOptions ¶
IndexOptions represents options to set when initializing an index.
type IndexStatus ¶
type IndexStatus struct { Name string CreatedAt int64 Fields []*FieldStatus }
IndexStatus is an internal message representing the contents of an index.
type IndexTranslateOffsetMap ¶
type IndexTranslateOffsetMap struct { Partitions map[int]uint64 `json:"partitions"` Fields map[string]uint64 `json:"fields"` }
func NewIndexTranslateOffsetMap ¶
func NewIndexTranslateOffsetMap() *IndexTranslateOffsetMap
type IndexUsage ¶ added in v2.8.0
type IndexUsage struct { Total uint64 `json:"total"` IndexKeys uint64 `json:"indexKeys"` FieldKeysTotal uint64 `json:"fieldKeysTotal"` Fragments uint64 `json:"fragments"` Metadata uint64 `json:"metadata"` Fields map[string]FieldUsage `json:"fields"` }
IndexUsage represents the storage space used on disk by one index, on one node.
type InspectRequest ¶
type InspectRequest struct { HolderFilterParams InspectRequestParams }
InspectRequest represents a request for a possibly-partial holder inspection, using a provided holder filter and inspect-specific parameters.
type InspectRequestParams ¶
type InspectRequestParams struct { Containers bool // include container details Checksum bool // perform checksums }
InspectRequestParams represents the parts of an InspectRequest that aren't generic holder filtering attributes.
type InspectResponse ¶
type InspectResponse struct { Fragments []struct { Index string Field string View string Shard int64 Path string Info *FragmentInfo } }
InspectResponse contains the structured results for an InspectRequest. It may some day be expanded to include metadata about views or indexes.
type InternalClient ¶
type InternalClient interface { InternalQueryClient MaxShardByIndex(ctx context.Context) (map[string]uint64, error) Schema(ctx context.Context) ([]*IndexInfo, error) PostSchema(ctx context.Context, uri *URI, s *Schema, remote bool) error CreateIndex(ctx context.Context, index string, opt IndexOptions) error FragmentNodes(ctx context.Context, index string, shard uint64) ([]*Node, error) Nodes(ctx context.Context) ([]*Node, error) Query(ctx context.Context, index string, queryRequest *QueryRequest) (*QueryResponse, error) Import(ctx context.Context, index, field string, shard uint64, bits []Bit, opts ...ImportOption) error ImportK(ctx context.Context, index, field string, bits []Bit, opts ...ImportOption) error EnsureIndex(ctx context.Context, name string, options IndexOptions) error EnsureField(ctx context.Context, indexName string, fieldName string) error EnsureFieldWithOptions(ctx context.Context, index, field string, opt FieldOptions) error ImportValue(ctx context.Context, index, field string, shard uint64, vals []FieldValue, opts ...ImportOption) error ImportValueK(ctx context.Context, index, field string, vals []FieldValue, opts ...ImportOption) error ImportValue2(ctx context.Context, req *ImportValueRequest, options *ImportOptions) error ExportCSV(ctx context.Context, index, field string, shard uint64, w io.Writer) error CreateField(ctx context.Context, index, field string) error CreateFieldWithOptions(ctx context.Context, index, field string, opt FieldOptions) error FragmentBlocks(ctx context.Context, uri *URI, index, field, view string, shard uint64) ([]FragmentBlock, error) BlockData(ctx context.Context, uri *URI, index, field, view string, shard uint64, block int) ([]uint64, []uint64, error) ColumnAttrDiff(ctx context.Context, uri *URI, index string, blks []AttrBlock) (map[uint64]map[string]interface{}, error) RowAttrDiff(ctx context.Context, uri *URI, index, field string, blks []AttrBlock) (map[uint64]map[string]interface{}, error) SendMessage(ctx context.Context, uri *URI, msg []byte) error RetrieveShardFromURI(ctx context.Context, index, field, view string, shard uint64, uri URI) (io.ReadCloser, error) RetrieveTranslatePartitionFromURI(ctx context.Context, index string, partition int, uri URI) (io.ReadCloser, error) ImportRoaring(ctx context.Context, uri *URI, index, field string, shard uint64, remote bool, req *ImportRoaringRequest) error ImportColumnAttrs(ctx context.Context, uri *URI, index string, req *ImportColumnAttrsRequest) error StartTransaction(ctx context.Context, id string, timeout time.Duration, exclusive bool) (*Transaction, error) FinishTransaction(ctx context.Context, id string) (*Transaction, error) Transactions(ctx context.Context) (map[string]*Transaction, error) GetTransaction(ctx context.Context, id string) (*Transaction, error) GetNodeUsage(ctx context.Context, uri *URI) (map[string]NodeUsage, error) GetPastQueries(ctx context.Context, uri *URI) ([]PastQueryStatus, error) }
InternalClient should be implemented by any struct that enables any transport between nodes TODO: Refactor Note from Travis: Typically an interface containing more than two or three methods is an indication that something hasn't been architected correctly. While I understand that putting the entire Client behind an interface might require this many methods, I don't want to let it go unquestioned. Another note from Travis: I think we eventually want to unify `InternalClient` with the `go-pilosa` client. Doing that may obviate the need to refactor this.
type InternalQueryClient ¶
type InternalQueryClient interface { QueryNode(ctx context.Context, uri *URI, index string, queryRequest *QueryRequest) (*QueryResponse, error) // Trasnlate keys on the particular node. The parameter writable informs TranslateStore if we can generate a new ID if any of keys does not exist. TranslateKeysNode(ctx context.Context, uri *URI, index, field string, keys []string, writable bool) ([]uint64, error) TranslateIDsNode(ctx context.Context, uri *URI, index, field string, id []uint64) ([]string, error) FindIndexKeysNode(ctx context.Context, uri *URI, index string, keys ...string) (map[string]uint64, error) FindFieldKeysNode(ctx context.Context, uri *URI, index string, field string, keys ...string) (map[string]uint64, error) CreateIndexKeysNode(ctx context.Context, uri *URI, index string, keys ...string) (map[string]uint64, error) CreateFieldKeysNode(ctx context.Context, uri *URI, index string, field string, keys ...string) (map[string]uint64, error) }
InternalQueryClient is the internal interface for querying a node.
type Jmphasher ¶ added in v2.3.0
type Jmphasher struct{}
Jmphasher represents an implementation of jmphash. Implements Hasher.
type KeyOrID ¶ added in v2.2.0
func (KeyOrID) MarshalJSON ¶ added in v2.2.0
type LineSorter ¶ added in v2.2.0
type MemoryUsage ¶ added in v2.8.1
MemoryUsage represents the memory used by one node.
type Message ¶
type Message interface{}
Message is the interface implemented by all core pilosa types which can be serialized to messages. TODO add at least a single "isMessage()" method.
type MessageProcessingError ¶
type MessageProcessingError struct {
Err error
}
MessageProcessingError is an error indicating that a cluster message could not be processed.
func (MessageProcessingError) Cause ¶
func (err MessageProcessingError) Cause() error
Cause allows the error to be unwrapped.
func (MessageProcessingError) Error ¶
func (err MessageProcessingError) Error() string
func (MessageProcessingError) Unwrap ¶
func (err MessageProcessingError) Unwrap() error
Unwrap allows the error to be unwrapped.
type MultiReaderB ¶ added in v2.2.0
type MultiReaderB struct {
// contains filtered or unexported fields
}
MultiReaderB is returned by RoaringBitmapReader. It verifies that identical byte streams are read from its two members.
func (*MultiReaderB) Close ¶ added in v2.2.0
func (m *MultiReaderB) Close() error
type MultiTranslateEntryReader ¶
type MultiTranslateEntryReader struct {
// contains filtered or unexported fields
}
MultiTranslateEntryReader reads from multiple TranslateEntryReader instances and merges them into a single reader.
func NewMultiTranslateEntryReader ¶
func NewMultiTranslateEntryReader(ctx context.Context, readers []TranslateEntryReader) *MultiTranslateEntryReader
NewMultiTranslateEntryReader returns a new instance of MultiTranslateEntryReader.
func (*MultiTranslateEntryReader) Close ¶
func (r *MultiTranslateEntryReader) Close() error
Close stops the reader & child readers and waits for all goroutines to stop.
func (*MultiTranslateEntryReader) ReadEntry ¶
func (r *MultiTranslateEntryReader) ReadEntry(entry *TranslateEntry) error
ReadEntry reads the next available entry into entry. Returns an error if any of the child readers error. Returns io.EOF if reader is closed.
type Node ¶
type Node struct { ID string `json:"id"` URI URI `json:"uri"` GRPCURI URI `json:"grpc-uri"` IsCoordinator bool `json:"isCoordinator"` State string `json:"state"` }
Node represents a node in the cluster.
type NodeEvent ¶
type NodeEvent struct { Event NodeEventType Node *Node }
NodeEvent is a single event related to node activity in the cluster.
type NodeEventType ¶
type NodeEventType int
NodeEventType are the types of node events.
const ( NodeJoin NodeEventType = iota NodeLeave NodeUpdate )
Constant node event types.
type NodeStateMessage ¶
type NodeStateMessage struct { NodeID string `protobuf:"bytes,1,opt,name=NodeID,proto3" json:"NodeID,omitempty"` State string `protobuf:"bytes,2,opt,name=State,proto3" json:"State,omitempty"` }
NodeStateMessage is an internal message for broadcasting a node's state.
type NodeStatus ¶
type NodeStatus struct { Node *Node Indexes []*IndexStatus Schema *Schema }
NodeStatus is an internal message representing the contents of a node.
type NodeUsage ¶ added in v2.3.0
type NodeUsage struct { Disk DiskUsage `json:"diskUsage"` Memory MemoryUsage `json:"memoryUsage"` }
NodeUsage represents all usage measurements for one node.
type Nodes ¶
type Nodes []*Node
Nodes represents a list of nodes.
func (Nodes) ContainsID ¶
ContainsID returns true if host matches one of the node's id.
type NopGeneration ¶ added in v2.2.0
type NopGeneration struct { }
NopGeneration is used in fragment.openStorage() to short-circuit generation stuff that only applies to RoaringTx; doesn't apply to RBFTx/BadgerTx/etc.
func (*NopGeneration) Bytes ¶ added in v2.2.0
func (g *NopGeneration) Bytes() (ret []byte)
func (*NopGeneration) Dead ¶ added in v2.2.0
func (g *NopGeneration) Dead() bool
func (*NopGeneration) Done ¶ added in v2.2.0
func (g *NopGeneration) Done()
func (*NopGeneration) Generation ¶ added in v2.2.0
func (g *NopGeneration) Generation() int64
func (*NopGeneration) ID ¶ added in v2.2.0
func (g *NopGeneration) ID() string
func (*NopGeneration) Transaction ¶ added in v2.2.0
func (g *NopGeneration) Transaction(w *io.Writer, f func() error) error
type NotFoundError ¶
type NotFoundError error
NotFoundError wraps an error value to signify that a resource was not found such that in an HTTP scenario, http.StatusNotFound would be returned.
type OpenIDAllocatorFunc ¶ added in v2.5.0
type OpenTransactionStoreFunc ¶
type OpenTransactionStoreFunc func(path string) (TransactionStore, error)
type OpenTranslateReaderFunc ¶
type OpenTranslateReaderFunc func(ctx context.Context, nodeURL string, offsets TranslateOffsetMap) (TranslateEntryReader, error)
OpenTranslateReaderFunc represents a function for instantiating and opening a TranslateStore.
type OpenTranslateStoreFunc ¶
type OpenTranslateStoreFunc func(path, index, field string, partitionID, partitionN int) (TranslateStore, error)
OpenTranslateStoreFunc represents a function for instantiating and opening a TranslateStore.
type PairField ¶
PairField is a Pair with its associated field.
func (PairField) MarshalJSON ¶
MarshalJSON marshals PairField into a JSON-encoded byte slice, excluding `Field`.
type Pairs ¶
type Pairs []Pair
Pairs is a sortable slice of Pair objects.
func (*Pairs) Pop ¶
func (p *Pairs) Pop() interface{}
Pop removes the minimum element from the Pair slice.
type PairsField ¶
PairsField is a Pairs object with its associated field.
func (*PairsField) Clone ¶ added in v2.2.0
func (p *PairsField) Clone() (r *PairsField)
func (PairsField) MarshalJSON ¶
func (p PairsField) MarshalJSON() ([]byte, error)
MarshalJSON marshals PairsField into a JSON-encoded byte slice, excluding `Field`.
func (*PairsField) ToRows ¶
func (p *PairsField) ToRows(callback func(*pb.RowResponse) error) error
ToRows implements the ToRowser interface.
func (*PairsField) ToTable ¶
func (p *PairsField) ToTable() (*pb.TableResponse, error)
ToTable implements the ToTabler interface.
type PastQueryStatus ¶ added in v2.4.0
type PreconditionFailedError ¶
type PreconditionFailedError struct {
// contains filtered or unexported fields
}
type Qcx ¶ added in v2.2.0
type Qcx struct { Grp *TxGroup Txf *TxFactory // RequiredForAtomicWriteTx is used by api.ImportAtomicRecord // to ensure that all writes happen on this one Tx. RequiredForAtomicWriteTx *Tx // efficient access to the options for RequiredForAtomicWriteTx RequiredTxo *Txo // contains filtered or unexported fields }
Qcx is a (Pilosa) Query Context.
It flexibly expresses the desired grouping of Tx for mass rollback at a query's end. It provides one-time commit for an atomic import write Tx that involves multiple fragments.
The most common use of Qcx is to call GetTx() to obtain a Tx locally, once the index/shard pair is known:
someFunc(qcx Qcx, idx *Index, shard uint64) (err0 error) { tx, finisher := qcx.GetTx(Txo{Write: true, Index:idx, Shard:shard, ...}) defer finisher(&err0) ... }
Qcx reuses read-only Tx on the same index/shard pair. See the Qcx.GetTx() for further discussion. The caveat is of course that your "new" read Tx actually has an "old" view of the database.
At the moment, most writes to individual shards are commited eagerly and locally when the `defer finisher(&err0)` is run. This is done by returning a finisher that actually Commits, thus freeing the one write slot for re-use. A single writer is also required by RBF, so this design accomodates both.
In contrast, the default read Tx generated (or re-used) will return a no-op finisher and the group of reads as a whole will be rolled back (mmap memory released) en-mass when Qcx.Abort() is called at the top-most level.
Local use of a (Tx, finisher) pair obtained from Qcx.GetTx() doesn't need to care about these details. Local use should always invoke finisher(&err0) or finisher(nil) to complete the Tx within the local function scope.
In summary write Tx are typically "local" and are never saved into the TxGroup. The parallelism supplied by TxGroup typically applies only to read Tx.
The one exception is this rule is for the one write Tx used during the api.ImportAtomicRecord routine. There we make a special write Tx and use it for all matching writes. This is then committed at the final, top-level, Qcx.Finish() call.
See also the Qcx.GetTx() example and the TxGroup description below.
func (*Qcx) Abort ¶ added in v2.2.0
func (q *Qcx) Abort()
Abort rolls back all Tx generated and stored within the Qcx. The Qcx is then reset and can be used again immediately.
func (*Qcx) ClearRequiredForAtomicWriteTx ¶ added in v2.2.0
func (qcx *Qcx) ClearRequiredForAtomicWriteTx()
func (*Qcx) Finish ¶ added in v2.2.0
Finish commits/rollsback all stored Tx. It no longer resets the Qcx for further operations automatically. User must call Reset() or NewQxc() again.
func (*Qcx) GetTx ¶ added in v2.2.0
GetTx is used like this:
someFunc(ctx context.Context, shard uint64) (_ interface{}, err0 error) {
tx, finisher := qcx.GetTx(Txo{Write: !writable, Index: idx, Shard: shard}) defer finisher(&err0) return e.executeIncludesColumnCallShard(ctx, tx, index, c, shard, col) }
Note we are tracking the returned err0 error value of someFunc(). An option instead is to say
defer finisher(nil)
This means always Commit writes, ignoring if there were errors. This style is expected to be rare compared to the typical
defer finisher(&err0)
invocation, where err0 is your return from the enclosing function error. If the Tx is local and not a part of a group, then the finisher consults that error to decides whether to Commit() or Rollback().
If instead the Tx becomes part of a group, then the local finisher() is always a no-op, in deference to the Qcx.Finish() or Qcx.Abort() calls.
Take care the finisher(&err) is capturing the address of the enclosing function's err and that it has not been shadowed locally by another _, err := f() call. For this reason, it can be clearer (and much safer) to rename the enclosing functions 'err' to 'err0', to make it clear we are referring to the first and final error.
func (*Qcx) ListOpenTx ¶ added in v2.2.0
func (*Qcx) Reset ¶ added in v2.3.0
func (q *Qcx) Reset()
Reset forgets everything are starts fresh with an empty group, ready for use again as if NewQcx() had been called.
func (*Qcx) SetRequiredForAtomicWriteTx ¶ added in v2.2.0
func (*Qcx) StartAtomicWriteTx ¶ added in v2.2.0
StartAtomicWriteTx allocates a Tx and stores it in qcx.RequiredForAtomicWriteTx. All subsequent writes to this shard/index will re-use it.
type QueryRequest ¶
type QueryRequest struct { // Index to execute query against. Index string // The query string to parse and execute. Query string // The SQL source query, if applicable. SQLQuery string // The shards to include in the query execution. // If empty, all shards are included. Shards []uint64 // Return column attributes, if true. ColumnAttrs bool // Do not return row attributes, if true. ExcludeRowAttrs bool // Do not return columns, if true. ExcludeColumns bool // If true, indicates that query is part of a larger distributed query. // If false, this request is on the originating node. Remote bool // Query has already been translated. This is only used if Remote // is false, Remote=true implies this. PreTranslated bool // Should we profile this query? Profile bool // Additional data associated with the query, in cases where there's // row-style inputs for precomputed values. EmbeddedData []*Row }
QueryRequest represent a request to process a query.
type QueryResponse ¶
type QueryResponse struct { // Result for each top-level query call. // The result type differs depending on the query; types // include: Row, RowIdentifiers, GroupCounts, SignedRow, // ValCount, Pair, Pairs, bool, uint64. Results []interface{} // Set of column attribute objects matching IDs returned in Result. ColumnAttrSets []*ColumnAttrSet // Error during parsing or execution. Err error // Profiling data, if any Profile *tracing.Profile }
QueryResponse represent a response from a processed query.
func (*QueryResponse) MarshalJSON ¶
func (resp *QueryResponse) MarshalJSON() ([]byte, error)
MarshalJSON marshals QueryResponse into a JSON-encoded byte slice
type RBFTx ¶ added in v2.2.0
type RBFTx struct { Db *RbfDBWrapper // contains filtered or unexported fields }
func (*RBFTx) Add ¶ added in v2.2.0
func (tx *RBFTx) Add(index, field, view string, shard uint64, batched bool, a ...uint64) (changeCount int, err error)
Add sets all the a bits hot in the specified fragment.
func (*RBFTx) ApplyFilter ¶ added in v2.7.0
func (*RBFTx) ContainerIterator ¶ added in v2.2.0
func (*RBFTx) CountRange ¶ added in v2.2.0
func (tx *RBFTx) CountRange(index, field, view string, shard uint64, start, end uint64) (n uint64, err error)
CountRange returns the count of hot bits in the start, end range on the fragment. roaring.countRange counts the number of bits set between [start, end).
func (*RBFTx) ForEachRange ¶ added in v2.2.0
func (*RBFTx) GetFieldSizeBytes ¶ added in v2.8.0
func (*RBFTx) GetSortedFieldViewList ¶ added in v2.7.0
func (*RBFTx) ImportRoaringBits ¶ added in v2.2.0
func (*RBFTx) IncrementOpN ¶ added in v2.2.0
func (*RBFTx) NewTxIterator ¶ added in v2.2.0
func (*RBFTx) OffsetRange ¶ added in v2.2.0
func (*RBFTx) PutContainer ¶ added in v2.2.0
func (*RBFTx) Readonly ¶ added in v2.2.0
Readonly is true if the transaction is not read-and-write, but only doing reads.
func (*RBFTx) Remove ¶ added in v2.2.0
func (tx *RBFTx) Remove(index, field, view string, shard uint64, a ...uint64) (changeCount int, err error)
Remove clears all the specified a bits in the chosen fragment.
func (*RBFTx) RemoveContainer ¶ added in v2.2.0
func (*RBFTx) RoaringBitmap ¶ added in v2.2.0
func (*RBFTx) RoaringBitmapReader ¶ added in v2.2.0
func (*RBFTx) UnionInPlace ¶ added in v2.2.0
func (*RBFTx) UseRowCache ¶ added in v2.2.0
type RawRoaringData ¶ added in v2.2.0
type RawRoaringData struct {
// contains filtered or unexported fields
}
RawRoaringData used by ImportRoaringBits. must be consumable by roaring.newRoaringIterator()
func (*RawRoaringData) Iterator ¶ added in v2.2.0
func (rr *RawRoaringData) Iterator() (roaring.RoaringIterator, error)
type RbfDBWrapper ¶ added in v2.2.0
type RbfDBWrapper struct {
// contains filtered or unexported fields
}
RbfDBWrapper wraps an *rbf.DB
func (*RbfDBWrapper) CleanupTx ¶ added in v2.2.0
func (w *RbfDBWrapper) CleanupTx(tx Tx)
func (*RbfDBWrapper) Close ¶ added in v2.2.0
func (w *RbfDBWrapper) Close() error
func (*RbfDBWrapper) DeleteDBPath ¶ added in v2.2.0
func (w *RbfDBWrapper) DeleteDBPath(dbs *DBShard) error
func (*RbfDBWrapper) DeleteField ¶ added in v2.2.0
func (w *RbfDBWrapper) DeleteField(index, field, fieldPath string) error
func (*RbfDBWrapper) DeleteFragment ¶ added in v2.2.0
func (w *RbfDBWrapper) DeleteFragment(index, field, view string, shard uint64, frag interface{}) error
func (*RbfDBWrapper) DeleteIndex ¶ added in v2.2.0
func (w *RbfDBWrapper) DeleteIndex(indexName string) error
func (*RbfDBWrapper) HasData ¶ added in v2.2.0
func (w *RbfDBWrapper) HasData() (has bool, err error)
func (*RbfDBWrapper) OpenListString ¶ added in v2.2.0
func (w *RbfDBWrapper) OpenListString() (r string)
func (*RbfDBWrapper) OpenSnList ¶ added in v2.2.0
func (w *RbfDBWrapper) OpenSnList() (slc []int64)
func (*RbfDBWrapper) Path ¶ added in v2.2.0
func (w *RbfDBWrapper) Path() string
func (*RbfDBWrapper) SetHolder ¶ added in v2.2.0
func (w *RbfDBWrapper) SetHolder(h *Holder)
type RecalculateCaches ¶
type RecalculateCaches struct{}
RecalculateCaches is an internal message for recalculating all caches within a holder.
type ResizeInstruction ¶
type ResizeInstruction struct { JobID int64 Node *Node Coordinator *Node Sources []*ResizeSource TranslationSources []*TranslationResizeSource NodeStatus *NodeStatus ClusterStatus *ClusterStatus }
ResizeInstruction contains the instruction provided to a node during a cluster resize operation.
type ResizeInstructionComplete ¶
ResizeInstructionComplete is an internal message to the coordinator indicating that the resize instructions performed on a single node have completed.
type ResizeSource ¶
type ResizeSource struct { Node *Node `protobuf:"bytes,1,opt,name=Node" json:"Node,omitempty"` Index string `protobuf:"bytes,2,opt,name=Index,proto3" json:"Index,omitempty"` Field string `protobuf:"bytes,3,opt,name=Field,proto3" json:"Field,omitempty"` View string `protobuf:"bytes,4,opt,name=View,proto3" json:"View,omitempty"` Shard uint64 `protobuf:"varint,5,opt,name=Shard,proto3" json:"Shard,omitempty"` }
ResizeSource is the source of data for a node acting on a ResizeInstruction.
type RoaringTx ¶ added in v2.2.0
RoaringTx represents a fake transaction object for Roaring storage.
func (*RoaringTx) ApplyFilter ¶ added in v2.7.0
func (*RoaringTx) ContainerIterator ¶ added in v2.2.0
func (*RoaringTx) CountRange ¶ added in v2.2.0
func (*RoaringTx) ForEachRange ¶ added in v2.2.0
func (*RoaringTx) GetFieldSizeBytes ¶ added in v2.8.0
func (*RoaringTx) GetSortedFieldViewList ¶ added in v2.7.0
func (tx *RoaringTx) GetSortedFieldViewList(idx *Index, shard uint64) (fvs []txkey.FieldView, err error)
inefficient for roaring. Instead use the roaringGetFieldView2Shards() above.
func (*RoaringTx) ImportRoaringBits ¶ added in v2.2.0
func (tx *RoaringTx) ImportRoaringBits(index, field, view string, shard uint64, rit roaring.RoaringIterator, clear bool, log bool, rowSize uint64, data []byte) (changed int, rowSet map[uint64]int, err error)
ImportRoaringBits return values changed and rowSet will be inaccurate if the data []byte is supplied. This mimics the traditional roaring-per-file and should be faster.
func (*RoaringTx) IncrementOpN ¶ added in v2.2.0
func (*RoaringTx) NewTxIterator ¶ added in v2.2.0
NewTxIterator returns a *roaring.Iterator that MUST have Close() called on it BEFORE the transaction Commits or Rollsback.
func (*RoaringTx) OffsetRange ¶ added in v2.2.0
func (*RoaringTx) PutContainer ¶ added in v2.2.0
func (*RoaringTx) RemoveContainer ¶ added in v2.2.0
func (*RoaringTx) RoaringBitmap ¶ added in v2.2.0
func (*RoaringTx) RoaringBitmapReader ¶ added in v2.2.0
func (*RoaringTx) UnionInPlace ¶ added in v2.2.0
func (*RoaringTx) UseRowCache ¶ added in v2.2.0
type RoaringWrapper ¶ added in v2.2.0
type RoaringWrapper struct {
// contains filtered or unexported fields
}
RoaringWrapper provides the NewTx() method.
func (*RoaringWrapper) CleanupTx ¶ added in v2.2.0
func (w *RoaringWrapper) CleanupTx(tx Tx)
func (*RoaringWrapper) Close ¶ added in v2.2.0
func (w *RoaringWrapper) Close() (err error)
Close shuts down the Roaring database.
func (*RoaringWrapper) DeleteDBPath ¶ added in v2.2.0
func (w *RoaringWrapper) DeleteDBPath(dbs *DBShard) (err error)
func (*RoaringWrapper) DeleteField ¶ added in v2.2.0
func (w *RoaringWrapper) DeleteField(index, field, fieldPath string) error
func (*RoaringWrapper) DeleteFragment ¶ added in v2.2.0
func (w *RoaringWrapper) DeleteFragment(index, field, view string, shard uint64, frag interface{}) error
func (*RoaringWrapper) HasData ¶ added in v2.2.0
func (w *RoaringWrapper) HasData() (has bool, err error)
func (*RoaringWrapper) IsClosed ¶ added in v2.2.0
func (w *RoaringWrapper) IsClosed() (closed bool)
func (*RoaringWrapper) OpenListString ¶ added in v2.2.0
func (w *RoaringWrapper) OpenListString() (r string)
func (*RoaringWrapper) OpenSnList ¶ added in v2.2.0
func (w *RoaringWrapper) OpenSnList() (slc []int64)
func (*RoaringWrapper) Path ¶ added in v2.2.0
func (w *RoaringWrapper) Path() string
func (*RoaringWrapper) SetHolder ¶ added in v2.2.0
func (w *RoaringWrapper) SetHolder(h *Holder)
type Row ¶
type Row struct { // String keys translated to/from segment columns. Keys []string // Attributes associated with the row. Attrs map[string]interface{} // Index tells what index this row is from - needed for key translation. Index string // Field tells what field this row is from if it's a "vertical" // row. It may be the result of a Distinct query or Rows // query. Knowing the index and field, we can figure out how to // interpret the row data. Field string // NoSplit indicates that this row may not be split. // This is used for `Rows` calls in a GroupBy. NoSplit bool // contains filtered or unexported fields }
Row is a set of integers (the associated columns), and attributes which are arbitrary key/value pairs storing metadata about what the row represents.
func NewRowFromBitmap ¶
NewRowFromBitmap divides a bitmap into rows, which it now calls shards. This transposes; data that was in any shard for Row 0 is now considered shard 0, etcetera.
func NewRowFromRoaring ¶
NewRowFromRoaring parses a roaring data file as a row, dividing it into bitmaps and rowSegments based on shard width.
func (*Row) Difference ¶
Difference returns the diff of r and other.
func (*Row) MarshalJSON ¶
MarshalJSON returns a JSON-encoded byte slice of r.
func (*Row) Segments ¶
func (r *Row) Segments() []rowSegment
Segments returns a list of all segments in the row.
func (*Row) Shift ¶
Shift returns the bitwise shift of r by n bits. Currently only positive shift values are supported.
NOTE: the Shift method is currently unsupported, and is considerred to be incorrect. Please DO NOT use it. We are leaving it here in case someone internally wants to use it with the understanding that the results may be incorrect.
Why unsupported? For a full description, see: https://github.com/molecula/pilosa/issues/403. In short, the current implementation will shift a bit at the edge of a shard out of the shard and into a container which is assumed to be an invalid container for the shard. So for example, shifting the last bit of shard 0 (containers 0-15) will shift that bit out to container 16. While this "sort of" works, it breaks an assumption about containers, and might stop working in the future if that assumption is enforced.
func (*Row) ToRows ¶
func (r *Row) ToRows(callback func(*pb.RowResponse) error) error
ToRows implements the ToRowser interface.
func (*Row) ToTable ¶
func (r *Row) ToTable() (*pb.TableResponse, error)
ToTable implements the ToTabler interface.
type RowIDs ¶
type RowIDs []uint64
RowIDs is a query return type for just uint64 row ids. It should only be used internally (since RowIdentifiers is the external return type), but it is exported because the proto package needs access to it.
type RowIdentifiers ¶
type RowIdentifiers struct { Rows []uint64 `json:"rows"` Keys []string `json:"keys,omitempty"` // contains filtered or unexported fields }
RowIdentifiers is a return type for a list of row ids or row keys. The names `Rows` and `Keys` are meant to follow the same convention as the Row query which returns `Columns` and `Keys`. TODO: Rename this to something better. Anything.
func (*RowIdentifiers) Clone ¶ added in v2.2.0
func (r *RowIdentifiers) Clone() (clone *RowIdentifiers)
func (*RowIdentifiers) Field ¶
func (r *RowIdentifiers) Field() string
Field returns the field name associated to the row.
func (RowIdentifiers) ToRows ¶
func (r RowIdentifiers) ToRows(callback func(*pb.RowResponse) error) error
ToRows implements the ToRowser interface.
func (RowIdentifiers) ToTable ¶
func (r RowIdentifiers) ToTable() (*pb.TableResponse, error)
ToTable implements the ToTabler interface.
type Schema ¶
type Schema struct {
Indexes []*IndexInfo `json:"indexes"`
}
Schema contains information about indexes and their configuration.
type Serializer ¶
Serializer is an interface for serializing pilosa types to bytes and back.
type Server ¶
type Server struct {
// contains filtered or unexported fields
}
Server represents a holder wrapped by a running HTTP server.
func NewServer ¶
func NewServer(opts ...ServerOption) (*Server, error)
NewServer returns a new instance of Server.
func (*Server) FinishTransaction ¶
func (*Server) GetTransaction ¶
func (*Server) InternalClient ¶
func (s *Server) InternalClient() InternalClient
func (*Server) StartTransaction ¶
func (*Server) SyncData ¶
SyncData manually invokes the anti entropy process which makes sure that this node has the data from all replicas across the cluster.
func (*Server) Transactions ¶
type ServerOption ¶
ServerOption is a functional option type for pilosa.Server
func OptServerAntiEntropyInterval ¶
func OptServerAntiEntropyInterval(interval time.Duration) ServerOption
OptServerAntiEntropyInterval is a functional option on Server used to set the anti-entropy interval.
func OptServerAttrStoreFunc ¶
func OptServerAttrStoreFunc(af func(string) AttrStore) ServerOption
OptServerAttrStoreFunc is a functional option on Server used to provide the function to use to generate a new attribute store.
func OptServerClusterDisabled ¶
func OptServerClusterDisabled(disabled bool, hosts []string) ServerOption
OptServerClusterDisabled tells the server whether to use a static cluster with the defined hosts. Mostly used for testing.
func OptServerClusterHasher ¶
func OptServerClusterHasher(h Hasher) ServerOption
OptServerClusterHasher is a functional option on Server used to specify the consistent hash algorithm for data location within the cluster.
func OptServerClusterName ¶ added in v2.3.0
func OptServerClusterName(name string) ServerOption
OptServerClusterName sets the human-readable cluster name.
func OptServerDataDir ¶
func OptServerDataDir(dir string) ServerOption
OptServerDataDir is a functional option on Server used to set the data directory.
func OptServerDiagnosticsInterval ¶
func OptServerDiagnosticsInterval(dur time.Duration) ServerOption
OptServerDiagnosticsInterval is a functional option on Server used to specify the duration between diagnostic checks.
func OptServerExecutorPoolSize ¶
func OptServerExecutorPoolSize(size int) ServerOption
func OptServerGCNotifier ¶
func OptServerGCNotifier(gcn GCNotifier) ServerOption
OptServerGCNotifier is a functional option on Server used to set the garbage collection notification source.
func OptServerGRPCURI ¶
func OptServerGRPCURI(uri *URI) ServerOption
OptServerGRPCURI is a functional option on Server used to set the server gRPC URI.
func OptServerInternalClient ¶
func OptServerInternalClient(c InternalClient) ServerOption
OptServerInternalClient is a functional option on Server used to set the implementation of InternalClient.
func OptServerIsCoordinator ¶
func OptServerIsCoordinator(is bool) ServerOption
OptServerIsCoordinator is a functional option on Server used to specify whether or not this server is the coordinator.
func OptServerLogger ¶
func OptServerLogger(l logger.Logger) ServerOption
OptServerLogger is a functional option on Server used to set the logger.
func OptServerLongQueryTime ¶
func OptServerLongQueryTime(dur time.Duration) ServerOption
OptServerLongQueryTime is a functional option on Server used to set long query duration.
func OptServerMaxWritesPerRequest ¶
func OptServerMaxWritesPerRequest(n int) ServerOption
OptServerMaxWritesPerRequest is a functional option on Server used to set the maximum number of writes allowed per request.
func OptServerMetricInterval ¶
func OptServerMetricInterval(dur time.Duration) ServerOption
OptServerMetricInterval is a functional option on Server used to set the interval between metric samples.
func OptServerNodeDownRetries ¶
func OptServerNodeDownRetries(retries int, sleep time.Duration) ServerOption
OptServerNodeDownRetries is a functional option on Server used to specify the retries and sleep duration for node down checks.
func OptServerNodeID ¶
func OptServerNodeID(nodeID string) ServerOption
OptServerNodeID is a functional option on Server used to set the server node ID.
func OptServerOpenIDAllocator ¶ added in v2.5.0
func OptServerOpenIDAllocator(fn OpenIDAllocatorFunc) ServerOption
OptServerOpenIDAllocator is a functional option on Server used to specify the ID allocator data store type. Except not really (because there's only one at this time).
func OptServerOpenTranslateReader ¶
func OptServerOpenTranslateReader(fn OpenTranslateReaderFunc) ServerOption
OptServerOpenTranslateReader is a functional option on Server used to specify the remote translation data reader.
func OptServerOpenTranslateStore ¶
func OptServerOpenTranslateStore(fn OpenTranslateStoreFunc) ServerOption
OptServerOpenTranslateStore is a functional option on Server used to specify the translation data store type.
func OptServerPrimaryTranslateStore ¶
func OptServerPrimaryTranslateStore(store TranslateStore) ServerOption
OptServerPrimaryTranslateStore has been deprecated.
func OptServerQueryHistoryLength ¶ added in v2.4.0
func OptServerQueryHistoryLength(length int) ServerOption
OptServerQueryHistoryLength is a functional option on Server used to specify the length of the query history buffer that maintains the information returned at /query-history.
func OptServerRBFConfig ¶ added in v2.3.0
func OptServerRBFConfig(cfg *rbfcfg.Config) ServerOption
OptServerRBFConfig conveys the RBF flags to the Holder.
func OptServerReplicaN ¶
func OptServerReplicaN(n int) ServerOption
OptServerReplicaN is a functional option on Server used to set the number of replicas.
func OptServerRowcacheOn ¶ added in v2.6.0
func OptServerRowcacheOn(rowcacheOn bool) ServerOption
OptServerRowcacheOn is a functional option on Server used to turn on the row cache.
func OptServerSerializer ¶
func OptServerSerializer(ser Serializer) ServerOption
OptServerSerializer is a functional option on Server used to set the serializer.
func OptServerStatsClient ¶
func OptServerStatsClient(sc stats.StatsClient) ServerOption
OptServerStatsClient is a functional option on Server used to specify the stats client.
func OptServerSystemInfo ¶
func OptServerSystemInfo(si SystemInfo) ServerOption
OptServerSystemInfo is a functional option on Server used to set the system information source.
func OptServerTxsrc ¶ added in v2.2.0
func OptServerTxsrc(txsrc string) ServerOption
OptServerTxsrc is a functional option on Server used to specify the transactional-storage to use, resulting in RoaringTx, RbfTx, BadgerTx, or a blueGreen* Tx being used for all Tx interface calls.
func OptServerURI ¶
func OptServerURI(uri *URI) ServerOption
OptServerURI is a functional option on Server used to set the server URI.
type SetCoordinatorMessage ¶
type SetCoordinatorMessage struct {
New *Node
}
SetCoordinatorMessage is an internal message instructing nodes to honor a new coordinator.
type SignedRow ¶
type SignedRow struct { Neg *Row `json:"neg"` Pos *Row `json:"pos"` // contains filtered or unexported fields }
SignedRow represents a signed *Row with two (neg/pos) *Rows.
type SnapshotQueue ¶
type SnapshotQueue interface { Immediate(*fragment) error Enqueue(*fragment) Await(*fragment) error ScanHolder(*Holder, chan struct{}) Stop() }
snapshotQueue is a thing which can handle enqueuing snapshots. A snapshot queue distinguishes between high-priority requests, which get satisfied by the next available worker, and regular requests, which get enqueued if there's space in the queue, and otherwise dropped. There's also a separate background task to scan a holder for fragments which may need snapshots, but which is processed only when the queue is empty, and only slowly. "Await" awaits an existing snapshot if one is already enqueued. "Immediate" tries to do one right away. (If one's already enqueued, this can leave it in the queue, which will ignore anything that shows up with the request flag cleared.)
Await, Enqueue, and Immediate should be called only with the fragment lock held.
If you create a queue, it should get stopped at some point. The atomicSnapshotQueue implementation used as defaultSnapshotQueue has a Start function which will tell you whether it actually started a queue. This logic exists because in a normal server case, you probably want the queue to be shut down as part of server shutdown, but if you're running cluster tests, you probably want to start and shop the queue as part of the test, not stop it when any server terminates.
It's less likely to be desireable to start/stop individual queues, because fragments use the defaultSnapshotQueue anyway. This design needs revisiting.
type SortByTot ¶ added in v2.2.0
type SortByTot []*LineSorter
type SystemInfo ¶
type SystemInfo interface { Uptime() (uint64, error) Platform() (string, error) Family() (string, error) OSVersion() (string, error) KernelVersion() (string, error) MemFree() (uint64, error) MemTotal() (uint64, error) MemUsed() (uint64, error) CPUModel() string CPUCores() (physical int, logical int, err error) CPUMHz() (int, error) CPUArch() string DiskCapacity(string) (uint64, error) }
SystemInfo collects information about the host OS.
type TimeQuantum ¶
type TimeQuantum string
TimeQuantum represents a time granularity for time-based bitmaps.
func (TimeQuantum) HasDay ¶
func (q TimeQuantum) HasDay() bool
HasDay returns true if the quantum contains a 'D' unit.
func (TimeQuantum) HasHour ¶
func (q TimeQuantum) HasHour() bool
HasHour returns true if the quantum contains a 'H' unit.
func (TimeQuantum) HasMonth ¶
func (q TimeQuantum) HasMonth() bool
HasMonth returns true if the quantum contains a 'M' unit.
func (TimeQuantum) HasYear ¶
func (q TimeQuantum) HasYear() bool
HasYear returns true if the quantum contains a 'Y' unit.
func (*TimeQuantum) Set ¶
func (q *TimeQuantum) Set(value string) error
Set sets the time quantum value.
func (TimeQuantum) String ¶
func (q TimeQuantum) String() string
func (TimeQuantum) Type ¶
func (q TimeQuantum) Type() string
Type returns the type of a time quantum value.
func (TimeQuantum) Valid ¶
func (q TimeQuantum) Valid() bool
Valid returns true if q is a valid time quantum value.
type Topology ¶
type Topology struct { // Hashing algorithm used to assign partitions to nodes. Hasher Hasher // The number of partitions in the cluster. PartitionN int // The number of replicas a partition has. ReplicaN int // contains filtered or unexported fields }
Topology represents the list of hosts in the cluster. Topology now encapsulates all knowledge needed to determine the primary node in the replication scheme.
func DecodeTopology ¶ added in v2.3.0
func DecodeTopology(topology *internal.Topology, hasher Hasher, partitionN, replicaN int, c *cluster) (*Topology, error)
the cluster c is optional but give it if you have it.
func NewTopology ¶ added in v2.3.0
NewTopology creates a Topology.
The arguments and members hasher, partitionN, and replicaN were refactored out of struct cluster to allow pilosa-fsck to load a Topology from backup and then compute primaries standalone -- without starting a cluster. As pilosa-fsck operates on all backups at once from a single cpu, starting a full cluster isn't possible.
The hasher is the Hashing algorithm used to assign partitions to nodes. The cluster c should be provided if possible by pilosa code; the pilosa-fsck utility won't be able to provide it.
For the cluster size N, the topology gives preference to len(t.nodeIDs) before falling back on len(c.nodes).
func (*Topology) ContainsID ¶
ContainsID returns true if id matches one of the topology's IDs.
func (*Topology) GetNodeIDs ¶ added in v2.3.0
func (*Topology) GetNonPrimaryReplicas ¶ added in v2.3.0
func (*Topology) GetPrimaryForColKeyTranslation ¶ added in v2.3.0
The boltdb key translation stores are partitioned, designated by partitionIDs. These are shared between replicas, and one node is the primary for replication. So with 4 nodes and 3-way replication, each node has 3/4 of the translation stores on it.
func (*Topology) GetPrimaryForShardReplication ¶ added in v2.3.0
should match cluster.go:1033 cluster.ownsShard(nodeID, index, shard)
return Nodes(c.shardNodes(index, shard)).ContainsID(nodeID)
func (*Topology) GetReplicasForPrimary ¶ added in v2.3.0
func (topo *Topology) GetReplicasForPrimary(primary int) (replicaNodeIDs, nonReplicas map[string]bool)
the map replicaNodeIDs[nodeID] will have a true value for the primary nodeID, and false for others.
func (*Topology) KeyPartition ¶ added in v2.3.0
keyPartition returns the key-partition that a key belongs to. NOTE: the key-partition is DIFFERENT from the shard-partition.
func (*Topology) PrimaryNodeIndex ¶ added in v2.3.0
type Transaction ¶
type Transaction struct { // ID is an arbitrary string identifier. All transactions must have a unique ID. ID string `json:"id"` // Active notes whether an exclusive transaction is active, or // still pending (if other active transactions exist). All // non-exclusive transactions are always active. Active bool `json:"active"` // Exclusive is set to true for transactions which can only become active when no other // transactions exist. Exclusive bool `json:"exclusive"` // Timeout is the minimum idle time for which this transaction should continue to exist. Timeout time.Duration `json:"timeout"` // CreatedAt is the timestamp at which the transaction was created. This supports // the case of listing transactions in a useful order. CreatedAt time.Time `json:"createdAt"` // Deadline is calculated from Timeout. TODO reset deadline each time there is activity // on the transaction. (we can't do this until there is some method of associating a // request/call with a transaction) Deadline time.Time `json:"deadline"` // Stats track statistics for the transaction. Not yet used. Stats TransactionStats `json:"stats"` }
Transaction contains information related to a block of work that needs to be tracked and spans multiple API calls.
func (*Transaction) Copy ¶
func (trns *Transaction) Copy() *Transaction
func (*Transaction) MarshalJSON ¶
func (trns *Transaction) MarshalJSON() ([]byte, error)
func (*Transaction) UnmarshalJSON ¶
func (trns *Transaction) UnmarshalJSON(b []byte) error
type TransactionManager ¶
TransactionManager enforces the rules for transactions on a single node. It is goroutine-safe. It should be created by a call to NewTransactionManager where it takes a TransactionStore. If logging is desired, Log should be set before an instance of TransactionManager is used.
func NewTransactionManager ¶
func NewTransactionManager(store TransactionStore) *TransactionManager
NewTransactionManager creates a new TransactionManager with the given store, and starts a deadline-checker in a goroutine.
func (*TransactionManager) Finish ¶
func (tm *TransactionManager) Finish(ctx context.Context, id string) (*Transaction, error)
Finish completes and removes a transaction, returning the completed transaction (so that the caller can e.g. view the Stats)
func (*TransactionManager) Get ¶
func (tm *TransactionManager) Get(ctx context.Context, id string) (*Transaction, error)
Get retrieves the transaction with the given ID. Returns ErrTransactionNotFound if there isn't one.
func (*TransactionManager) List ¶
func (tm *TransactionManager) List(ctx context.Context) (map[string]*Transaction, error)
List returns map of all transactions by their ID. It is a copy and so may be retained and modified by the caller.
func (*TransactionManager) ResetDeadline ¶
func (tm *TransactionManager) ResetDeadline(ctx context.Context, id string) (*Transaction, error)
ResetDeadline updates the deadline for the transaction with the given ID to be equal to the current time plus the transaction's timeout.
func (*TransactionManager) Start ¶
func (tm *TransactionManager) Start(ctx context.Context, id string, timeout time.Duration, exclusive bool) (*Transaction, error)
Start starts a new transaction with the given parameters. If an exclusive transaction is pending or in progress, ErrTransactionExclusive is returned. If a transaction with the same id already exists, that transaction is returned along with ErrTransactionExists. If there is no error, the created transaction is returned—this is primarily so that the caller can discover if an exclusive transaction has been made immediately active or if they need to poll.
type TransactionMessage ¶
type TransactionMessage struct { Transaction *Transaction Action string }
type TransactionStats ¶
type TransactionStats struct{}
type TransactionStore ¶
type TransactionStore interface { // Put stores a new transaction or replaces an existing transaction with the given one. Put(trns *Transaction) error // Get retrieves the transaction at id or returns ErrTransactionNotFound if there isn't one. Get(id string) (*Transaction, error) // List returns a map of all transactions by ID. The map must be safe to modify by the caller. List() (map[string]*Transaction, error) // Remove deletes the transaction from the store. It must return ErrTransactionNotFound if there isn't one. Remove(id string) (*Transaction, error) }
TransactionStore declares the functionality which a store for Pilosa transactions must implement.
func OpenInMemTransactionStore ¶
func OpenInMemTransactionStore(path string) (TransactionStore, error)
type TranslateEntry ¶
type TranslateEntry struct { Index string `json:"index,omitempty"` Field string `json:"field,omitempty"` ID uint64 `json:"id,omitempty"` Key string `json:"key,omitempty"` }
TranslateEntry represents a key/ID pair from a TranslateStore.
type TranslateEntryReader ¶
type TranslateEntryReader interface { io.Closer ReadEntry(entry *TranslateEntry) error }
TranslateEntryReader represents a stream of translation entries.
type TranslateIDsRequest ¶
TranslateIDsRequest describes the structure of a request for a batch of id translations.
type TranslateIDsResponse ¶
type TranslateIDsResponse struct {
Keys []string
}
TranslateIDsResponse is the structured response of a id translation request.
type TranslateKeysRequest ¶
type TranslateKeysRequest struct { Index string Field string Keys []string // it's a awkward name, just to keep backward compatibility with go-pilosa and idk. NotWritable bool }
TranslateKeysRequest describes the structure of a request for a batch of key translations.
type TranslateKeysResponse ¶
type TranslateKeysResponse struct {
IDs []uint64
}
TranslateKeysResponse is the structured response of a key translation request.
type TranslateOffsetMap ¶
type TranslateOffsetMap map[string]*IndexTranslateOffsetMap
TranslateOffsetMap maintains a set of offsets for both indexes & fields.
func (TranslateOffsetMap) FieldOffset ¶
func (m TranslateOffsetMap) FieldOffset(index, name string) uint64
FieldOffset returns the offset for the given field.
func (TranslateOffsetMap) IndexPartitionOffset ¶
func (m TranslateOffsetMap) IndexPartitionOffset(name string, partitionID int) uint64
IndexOffset returns the offset for the given index.
func (TranslateOffsetMap) SetFieldOffset ¶
func (m TranslateOffsetMap) SetFieldOffset(index, name string, offset uint64)
SetFieldOffset sets the offset for the given field.
func (TranslateOffsetMap) SetIndexPartitionOffset ¶
func (m TranslateOffsetMap) SetIndexPartitionOffset(name string, partitionID int, offset uint64)
SetIndexOffset sets the offset for the given index.
type TranslateStore ¶
type TranslateStore interface { io.Closer // Returns the maximum ID set on the store. MaxID() (uint64, error) // Retrieves the partition ID associated with the store. // Only applies to index stores. PartitionID() int // Sets & retrieves whether the store is read-only. ReadOnly() bool SetReadOnly(v bool) // Converts a string key to its autoincrementing integer ID value. // // Translated id must be associated with a shard in the store's partition // unless partition is set to -1. TranslateKey(key string, writable bool) (uint64, error) TranslateKeys(key []string, writable bool) ([]uint64, error) // FindKeys looks up the ID for each key. // Keys are not created if they do not exist. // Missing keys are not considered errors, so the length of the result may be less than that of the input. FindKeys(keys ...string) (map[string]uint64, error) // CreateKeys maps all keys to IDs, creating the IDs if they do not exist. // If the translator is read-only, this will return an error. CreateKeys(keys ...string) (map[string]uint64, error) // Converts an integer ID to its associated string key. TranslateID(id uint64) (string, error) TranslateIDs(id []uint64) ([]string, error) // Forces the write of a key/id pair, even if read only. Used by replication. ForceSet(id uint64, key string) error // Returns a reader from the given ID offset. EntryReader(ctx context.Context, offset uint64) (TranslateEntryReader, error) // WriteTo ensures that the TranslateStore implements io.WriterTo. // It should write the contents of the store to the writer. WriteTo(io.Writer) (int64, error) // ReadFrom ensures that the TranslateStore implements io.ReaderFrom. // It should read from the reader and replace the data store with // the read payload. ReadFrom(io.Reader) (int64, error) ComputeTranslatorSummaryRows() (sum *TranslatorSummary, err error) ComputeTranslatorSummaryCols(partitionID int, topo *Topology) (sum *TranslatorSummary, err error) KeyWalker(walk func(key string, col uint64)) error IDWalker(walk func(key string, col uint64)) error RepairKeys(topo *Topology, verbose, applyKeyRepairs bool) (changed bool, err error) GetStorePath() string }
TranslateStore is the storage for translation string-to-uint64 values. For BoltDB implementation an empty string will be converted into the sentinel byte slice:
var emptyKey = []byte{ 0x00, 0x00, 0x00, 0x4d, 0x54, 0x4d, 0x54, // MTMT 0x00, 0xc2, 0xa0, // NO-BREAK SPACE 0x00, }
func OpenInMemTranslateStore ¶
func OpenInMemTranslateStore(rawurl, index, field string, partitionID, partitionN int) (TranslateStore, error)
OpenInMemTranslateStore returns a new instance of InMemTranslateStore. Implements OpenTranslateStoreFunc.
type TranslationResizeSource ¶
TranslationResizeSource is the source of translation data for a node acting on a ResizeInstruction.
type TranslationSyncer ¶ added in v2.2.0
type TranslationSyncer interface {
Reset() error
}
translationSyncer provides an interface allowing a function to notify the server that an action has occurred which requires the translation sync process to be reset. In general, this includes anything which modifies schema (add/remove index, etc), or anything that changes the cluster topology (add/remove node). I originally considered leveraging the broadcaster since that was already in place and provides similar event messages, but the broadcaster is really meant for notifiying other nodes, while this is more akin to an internal message bus. In fact, I think a future iteration on this may be to make it more generic so it can act as an internal message bus where one of the messages being published is "translationSyncReset".
var NopTranslationSyncer TranslationSyncer = &nopTranslationSyncer{}
NopTranslationSyncer represents a translationSyncer that doesn't do anything.
type TranslatorSummary ¶ added in v2.2.0
type TranslatorSummary struct { Index string // ParitionID is filled for column keys PartitionID int NodeID string StorePath string IsPrimary bool IsReplica bool // PrimaryNodeIndex indexes into the cluster []node array to find the primary PrimaryNodeIndex int // Field is filled for row keys Field string // Checksum has a blake3 crypto hash of all the keys->ID and all the ID->key mappings Checksum string // KeyCount has the number of Key->ID mappings KeyCount int // IDCount has the number of ID->Key mappings IDCount int // false for RowIDs, true for string-Key column IDs. IsColKey bool }
TranslatorSummary is returned, for example from the boltdb string key translators, by calling ComputeTranslatorSummary(). Non-boltdb mocks, etc no-op that method.
func (*TranslatorSummary) String ¶ added in v2.3.0
func (s *TranslatorSummary) String() string
type Tx ¶ added in v2.2.0
type Tx interface { // Type returns "roaring", "rbf", "bolt", "badger_roaring", or one of the other // blue-green Tx types at the top of txfactory.go Type() string // Rollback must be called the end of read-only transactions. Either // Rollback or Commit must be called at the end of writable transactions. // It is safe to call Rollback multiple times, but it must be // called at least once to release resources. Any Rollback after // a Commit is ignored, so 'defer tx.Rollback()' should be commonly // written after starting a new transaction. // // If there is an error during internal Rollback processing, // this would be quite serious, and the underlying storage is // expected to panic. Hence there is no explicit error returned // from Rollback that needs to be checked. Rollback() // Commit makes the updates in the Tx visible to subsequent transactions. Commit() error // IsDone must return true if Rollback() or Commit() has already // been called. Otherwise it must return false. This allows // DBWrapper.CleanupTx(tx Tx) to be idempotent. IsDone() bool // Readonly returns the flag this transaction was created with // during NewTx. If the transaction is writable, it will return false. Readonly() bool // UseRowCache is used by fragment.go unprotectedRow() to determine // dynamically at runtime if RoaringTx // are in use, which for continuity wants to continue to use the // rowCache, or if other storage engines (RBF, Badger) are in // use, which will mean that the bitmap data stored by the // rowCache can disappear as it is un-mmap-ed, causing crashes. UseRowCache() bool // IncrementOpN updates internal statistics with the changedN provided. IncrementOpN(index, field, view string, shard uint64, changedN int) // Pointer gives us a memory address for the underlying // transaction for debugging. // It is public because we use it in roaring to report invalid // container memory access outside of a transaction. Pointer() string // NewTxIterator returns it, a *roaring.Iterator whose it.Next() will // successively return each uint64 stored in the conceptual roaring.Bitmap // for the specified fragment. NewTxIterator(index, field, view string, shard uint64) (it *roaring.Iterator) // ContainerIterator loops over the containers in the conceptual // roaring.Bitmap for the specified fragment. // Calling Next() on the returned roaring.ContainerIterator gives // you a roaring.Container that is either run, array, or raw bitmap. // Return value 'found' is true when the ckey container was present. // ckey of 0 gives all containers (in the fragment). // // ContainerIterator must not have side-effects. blueGreenTx will // call it at the very beginning of commit to verify db contents. // // citer.Close() must be called when the client is done using it. ContainerIterator(index, field, view string, shard uint64, ckey uint64) (citer roaring.ContainerIterator, found bool, err error) // ApplyFilter applies a roaring.BitmapFilter to a specified shard, // starting at the given container key. The filter's ConsiderData // method may be called with transient Container objects which *must // not* be retained or referenced after that function exits. Similarly, // their data must not be retained. If you need the data later, you // must copy it into some other memory. ApplyFilter(index, field, view string, shard uint64, ckey uint64, filter roaring.BitmapFilter) (err error) // RoaringBitmap retreives the roaring.Bitmap for the entire shard. RoaringBitmap(index, field, view string, shard uint64) (*roaring.Bitmap, error) // Container returns the roaring.Container for the given ckey // (container-key or highbits), in the chosen fragment. Container(index, field, view string, shard uint64, ckey uint64) (*roaring.Container, error) // PutContainer stores c under the given ckey (container-key), in the specified fragment. PutContainer(index, field, view string, shard uint64, ckey uint64, c *roaring.Container) error // RemoveContainer deletes the roaring.Container under the given ckey (container-key), // in the specified fragment. RemoveContainer(index, field, view string, shard uint64, ckey uint64) error // Add adds the 'a' bits to the specified fragment. // // Using batched=true allows efficient bulk-import. // // Notes on the RoaringTx implementation: // If the batched flag is true, then the roaring.Bitmap.AddN() is used, which does oplog batches. // If the batched flag is false, then the roaring.Bitmap.Add() is used, which does simple opTypeAdd single adds. // // Beware: if batched is false, then changeCount will only ever be 0 or 1, // because it calls roaring.Add(). // If batched is true, we call roaring.DirectAddN() and then changeCount // will be accurate if the changeCount is greater than 0. // // Hence: only ever call Add(batched=false) if changeCount is expected to be 0 or 1. // Or, must use Add(batched=true) if changeCount can be > 1. // Add(index, field, view string, shard uint64, batched bool, a ...uint64) (changeCount int, err error) // Remove removes the 'a' values from the Bitmap for the fragment. Remove(index, field, view string, shard uint64, a ...uint64) (changeCount int, err error) // Contains tests if the uint64 v is stored in the fragment's Bitmap. Contains(index, field, view string, shard uint64, v uint64) (exists bool, err error) // ForEach ForEach(index, field, view string, shard uint64, fn func(i uint64) error) error // ForEachRange ForEachRange(index, field, view string, shard uint64, start, end uint64, fn func(uint64) error) error // Count Count(index, field, view string, shard uint64) (uint64, error) // Max Max(index, field, view string, shard uint64) (uint64, error) // Min Min(index, field, view string, shard uint64) (uint64, bool, error) // UnionInPlace UnionInPlace(index, field, view string, shard uint64, others ...*roaring.Bitmap) error // CountRange CountRange(index, field, view string, shard uint64, start, end uint64) (uint64, error) // OffsetRange OffsetRange(index, field, view string, shard uint64, offset, start, end uint64) (*roaring.Bitmap, error) // ImportRoaringBits does efficient bulk import using rit, a roaring.RoaringIterator. // // See the roaring package for details of the RoaringIterator. // // If clear is true, the bits from rit are cleared, otherwise they are set in the // specifed fragment. // // The data argument can be nil, its ignored for RBF/BadgerTx. It is supplied to // RoaringTx.ImportRoaringBits() in fragment.go fragment.fillFragmentFromArchive() // to do the traditional fragment.readStorageFromArchive() which // does some in memory field/view/fragment metadata updates. // It makes blueGreenTx testing viable too. // // ImportRoaringBits return values changed and rowSet may be inaccurate if // the data []byte is supplied (the RoaringTx implementation neglects this for speed). ImportRoaringBits(index, field, view string, shard uint64, rit roaring.RoaringIterator, clear bool, log bool, rowSize uint64, data []byte) (changed int, rowSet map[uint64]int, err error) RoaringBitmapReader(index, field, view string, shard uint64, fragmentPathForRoaring string) (r io.ReadCloser, sz int64, err error) // Group returns nil or the TxGroup that this Tx is a part of. Group() *TxGroup // Dump is for debugging, what does this Tx see as its database? Dump(short bool, shard uint64) // Options returns the options used to create this Tx. This // can be implementd by embedding Txo, and Txo provides the // Options() method. Options() Txo // Sn retreives the serial number of the Tx. Sn() int64 // GetSortedFieldViewList gets the set of FieldView(s) GetSortedFieldViewList(idx *Index, shard uint64) (fvs []txkey.FieldView, err error) GetFieldSizeBytes(index, field string) (uint64, error) }
Tx providers offer transactional storage for high-level roaring.Bitmaps and low-level roaring.Containers.
The common 4-tuple of (index, field, view, shard) jointly specify a fragment. A fragment conceptually holds one roaring.Bitmap.
Within the fragment, the ckey or container-key is the uint64 that specifies the high 48-bits of the roaring.Bitmap 64-bit space. The ckey is used to retreive a specific roaring.Container that is either a run, array, or raw-bitmap. The roaring.Container is the low 16-bits of the roaring.Bitmap space. Its size is at most 8KB (2^16 bits / (8 bits / byte) == 8192 bytes).
The grain of the transaction is guaranteed to be at least at the shard within one index. Therefore updates to the any of the fields within the same shard will be atomically visible only once the transaction commits. Reads from another, concurrently open, transaction will not see updates that have not been committed.
type TxBitmap ¶ added in v2.2.0
type TxBitmap struct {
// contains filtered or unexported fields
}
TxBitmap represents a bitmap that acts as a cache in front of a transaction. Updates to the bitmap first pull in containers as needed and update them in memory. The changes can be flushed in bulk using Flush().
func NewTxBitmap ¶ added in v2.2.0
type TxFactory ¶ added in v2.2.0
type TxFactory struct {
// contains filtered or unexported fields
}
TxFactory abstracts the creation of Tx interface-level transactions so that RBF, BoltDB, or Roaring-fragment-files, or several of these at once in parallel, is used as the storage and transction layer.
func NewTxFactory ¶ added in v2.2.0
NewTxFactory always opens an existing database. If you want to a fresh database, os.RemoveAll on dir/name ahead of time. We always store files in a subdir of holderDir.
func (*TxFactory) CloseIndex ¶ added in v2.2.0
func (*TxFactory) DeleteFieldFromStore ¶ added in v2.2.0
func (*TxFactory) DeleteFragmentFromStore ¶ added in v2.2.0
func (*TxFactory) DeleteIndex ¶ added in v2.2.0
func (*TxFactory) GetDBShardPath ¶ added in v2.2.0
func (*TxFactory) GetFieldView2ShardsMapForIndex ¶ added in v2.7.0
func (txf *TxFactory) GetFieldView2ShardsMapForIndex(idx *Index) (vs *FieldView2Shards, err error)
func (*TxFactory) GetShardsForIndex ¶ added in v2.2.0
func (f *TxFactory) GetShardsForIndex(idx *Index, roaringViewPath string, requireData bool) (map[uint64]bool, error)
DBPerShardGetShardsForIndex returns the shards for idx. If requireData, we open the database and see that it has a key, rather than assume that the database file presence is enough.
func (*TxFactory) IndexUsageDetails ¶ added in v2.8.0
func (f *TxFactory) IndexUsageDetails() (map[string]IndexUsage, uint64, error)
IndexUsageDetails computes the sum of filesizes used by the node, broken down by index, field, fragments and keys.
func (*TxFactory) IsTxDatabasePath ¶ added in v2.2.0
func (*TxFactory) NeedsSnapshot ¶ added in v2.2.0
func (*TxFactory) NewDBPerShard ¶ added in v2.2.0
func (txf *TxFactory) NewDBPerShard(types []txtype, holderDir string, holder *Holder) (d *DBPerShard)
func (*TxFactory) NewQcx ¶ added in v2.2.0
NewQcxWithGroup allocates a freshly allocated and empty Grp. The top-level executor will set qcx.write = true manually if the overall query is a write.
func (*TxFactory) NewTxGroup ¶ added in v2.2.0
NewTxGroup
func (*TxFactory) Open ¶ added in v2.2.0
Open should be called only once the index metadata is loaded from Holder.Open(), so we find all of our indexes.
func (*TxFactory) UseRowCache ¶ added in v2.3.0
UseRowCache can be more "global" than Tx at the moment, because we are sharing the same bool flag in rbf at the moment. If this changes then fragment.openStorage() will need a new way to determine if it should use the rowCache. Currently it doesn't have a tx Tx parameter, so we use the Txf instead.
type TxGroup ¶ added in v2.2.0
type TxGroup struct {
// contains filtered or unexported fields
}
TxGroup holds a set of read and a set of write transactions that will en-mass have Rollback() (for the read set) and Commit() (for the write set) called on them when TxGroup.Finish() is invoked. Alternatively, TxGroup.Abort() will call Rollback() on all Tx group memebers.
func (*TxGroup) AbortGroup ¶ added in v2.2.0
func (g *TxGroup) AbortGroup()
Abort calls Rollback() on all the group Tx, and marks the group as finished. Either Abort() or Finish() must be called on the TxGroup.
func (*TxGroup) AlreadyHaveTx ¶ added in v2.2.0
func (*TxGroup) FinishGroup ¶ added in v2.2.0
Finish commits the write tx and calls Rollback() on the read tx contained in the group. Either Abort() or Finish() must be called on the TxGroup exactly once.
type TxStore ¶ added in v2.2.0
type TxStore interface { // DeleteFragment deletes all the containers in a fragment. // // This is not in a Tx because it will often do too many deletes for a single // transaction, and clients would be suprised to find their Tx had already // been commited and they are getting an error on double-Commit. // Instead each TxStore implementation creates and commits as many // transactions as needed. // // Argument frag should be passed by any RoaringTx user, but for RBF/Badger it can be nil. // If not nil, it must be of type *fragment. If frag is supplied, then // index must be equal to frag.index, field equal to frag.field, view equal // to frag.view, and shard equal to frag.shard. // DeleteFragment(index, field, view string, shard uint64, frag interface{}) error DeleteField(index, field string) error // Close shuts down the database. Close() error }
TxStore has operations that will create and commit multiple Tx on a backing store.
type Txo ¶ added in v2.2.0
type Txo struct { Write bool Field *Field Index *Index Fragment *fragment Shard uint64 Group *TxGroup // contains filtered or unexported fields }
Txo holds the transaction options
type URI ¶
type URI struct { Scheme string `json:"scheme"` Host string `json:"host"` Port uint16 `json:"port"` }
URI represents a Pilosa URI. A Pilosa URI consists of three parts: 1) Scheme: Protocol of the URI. Default: http. 2) Host: Hostname or IP URI. Default: localhost. IPv6 addresses should be written in brackets, e.g., `[fd42:4201:f86b:7e09:216:3eff:fefa:ed80]`. 3) Port: Port of the URI. Default: 10101.
All parts of the URI are optional. The following are equivalent:
http://localhost:10101 http://localhost http://:10101 localhost:10101 localhost :10101
func AddressWithDefaults ¶
AddressWithDefaults converts addr into a valid address, using defaults when necessary.
func NewURIFromAddress ¶
NewURIFromAddress parses the passed address and returns a URI.
func NewURIFromHostPort ¶
NewURIFromHostPort returns a URI with specified host and port.
func (*URI) MarshalJSON ¶
MarshalJSON marshals URI into a JSON-encoded byte slice.
func (*URI) UnmarshalJSON ¶
UnmarshalJSON unmarshals a byte slice to a URI.
type URIs ¶
type URIs []URI
URIs is a convenience type representing a slice of URI.
func (URIs) HostPortStrings ¶
HostPortStrings returns a slice of host:port strings based on the slice of URI.
type UpdateCoordinatorMessage ¶
type UpdateCoordinatorMessage struct {
New *Node
}
UpdateCoordinatorMessage is an internal message for reassigning the coordinator.
type ValCount ¶
type ValCount struct { Val int64 `json:"value"` FloatVal float64 `json:"floatValue"` DecimalVal *pql.Decimal `json:"decimalValue"` Count int64 `json:"count"` }
ValCount represents a grouping of sum & count for Sum() and Average() calls. Also Min, Max....
Source Files
¶
- api.go
- apimethod_string.go
- attr.go
- audit.go
- barrier.go
- bluegreentx.go
- bolt.go
- broadcast.go
- bsi.go
- cache.go
- catcher.go
- client.go
- cluster.go
- cmd.go
- const_amd64.go
- dbshard.go
- diagnostics.go
- doc.go
- event.go
- executor.go
- field.go
- filesystem.go
- fragment.go
- gc.go
- generation.go
- generation_nodebug.go
- gid.go
- handler.go
- holder.go
- idalloc.go
- index.go
- iterator.go
- like.go
- metrics.go
- pilosa.go
- pjobs.go
- pprof.go
- rbf.go
- row.go
- rrtx.go
- server.go
- snapshotqueue.go
- stattx.go
- time.go
- tracker.go
- transaction.go
- translate.go
- tx.go
- txfactory.go
- uri.go
- util.go
- version.go
- view.go
- vprint.go
Directories
¶
Path | Synopsis |
---|---|
api
|
|
Package cmd contains all the pilosa subcommand definitions (1 per file).
|
Package cmd contains all the pilosa subcommand definitions (1 per file). |
pilosa
This is the entrypoint for the Pilosa binary.
|
This is the entrypoint for the Pilosa binary. |
package ctl contains all pilosa subcommands other than 'server'.
|
package ctl contains all pilosa subcommands other than 'server'. |
encoding
|
|
Package lru implements an LRU cache.
|
Package lru implements an LRU cache. |
Package pql defines the Pilosa Query Language.
|
Package pql defines the Pilosa Query Language. |
Package roaring implements roaring bitmaps with support for incremental changes.
|
Package roaring implements roaring bitmaps with support for incremental changes. |
Package txkey consolidates in one place the use of keys to index into our various storage/txn back-ends.
|
Package txkey consolidates in one place the use of keys to index into our various storage/txn back-ends. |
Package syswrap wraps syscalls (just mmap right now) in order to impose a global in-process limit on the maximum number of active mmaps.
|
Package syswrap wraps syscalls (just mmap right now) in order to impose a global in-process limit on the maximum number of active mmaps. |
Package txkey consolidates in one place the use of keys to index into our various storage/txn back-ends.
|
Package txkey consolidates in one place the use of keys to index into our various storage/txn back-ends. |