v2

package
v1.5.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Aug 17, 2022 License: AGPL-3.0 Imports: 34 Imported by: 0

Documentation

Index

Constants

View Source
const DataHeaderLength = 0

DataHeaderLength is the length in bytes for the data header

View Source
const DefaultFlushSizeBytes int = 30 * 1024 * 1024 // 30 MiB
View Source
const IndexHeaderLength = int(uint64Size) // 64bit checksum (xxhash)

IndexHeaderLength is the length in bytes for the record header

View Source
const VersionString = "v2"

Variables

View Source
var (
	// Gzip is the gnu zip compression pool
	Gzip = GzipPool{/* contains filtered or unexported fields */}
	// Lz4_64k is the l4z compression pool, with 64k buffer size
	Lz4_64k = LZ4Pool{/* contains filtered or unexported fields */}
	// Lz4_256k uses 256k buffer
	Lz4_256k = LZ4Pool{/* contains filtered or unexported fields */}
	// Lz4_1M uses 1M buffer
	Lz4_1M = LZ4Pool{/* contains filtered or unexported fields */}
	// Lz4_4M uses 4M buffer
	Lz4_4M = LZ4Pool{/* contains filtered or unexported fields */}
	// Snappy is the snappy compression pool
	Snappy SnappyPool
	// Noop is the no compression pool
	Noop NoopPool
	// Zstd Pool
	Zstd = ZstdPool{}
	// S2 Pool
	S2 = S2Pool{}

	// BytesBufferPool is a bytes buffer used for lines decompressed.
	// Buckets [0.5KB,1KB,2KB,4KB,8KB]
	BytesBufferPool = pool.New(1<<9, 1<<13, 2, func(size int) interface{} { return make([]byte, 0, size) })
)

Functions

func CopyBlock added in v1.4.0

func CopyBlock(ctx context.Context, meta *backend.BlockMeta, src backend.Reader, dest backend.Writer) error

CopyBlock copies a block from one backend to another. It is done at a low level, all encoding/formatting is preserved.

func CreateBlock added in v1.5.0

func NewDataReader

func NewDataReader(r backend.ContextReader, encoding backend.Encoding) (common.DataReader, error)

NewDataReader constructs a v2 DataReader that handles paged...reading

func NewDataWriter

func NewDataWriter(writer io.Writer, encoding backend.Encoding) (common.DataWriter, error)

NewDataWriter creates a paged page writer

func NewDedupingIterator added in v1.4.0

func NewDedupingIterator(iter common.Iterator, combiner model.ObjectCombiner, dataEncoding string) (common.Iterator, error)

NewDedupingIterator returns a dedupingIterator. This iterator is used to wrap another

iterator.  It will dedupe consecutive objects with the same id using the ObjectCombiner.

func NewIndexReader

func NewIndexReader(r backend.ContextReader, pageSizeBytes int, totalRecords int) (common.IndexReader, error)

NewIndexReader returns an index reader for a byte slice of marshalled ordered records. The index has not changed between v0 and v1.

func NewIndexWriter

func NewIndexWriter(pageSizeBytes int) common.IndexWriter

NewIndexWriter returns an index writer that writes to the provided io.Writer. The index has not changed between v0 and v1.

func NewIterator added in v1.4.0

func NewIterator(reader io.Reader, o common.ObjectReaderWriter) common.Iterator

NewIterator returns the most basic iterator. It iterates over raw objects.

func NewMultiblockIterator added in v1.4.0

func NewMultiblockIterator(ctx context.Context, inputs []common.Iterator, bufferSize int, combiner model.ObjectCombiner, dataEncoding string, logger log.Logger) common.Iterator

NewMultiblockIterator Creates a new multiblock iterator. Iterates concurrently in a separate goroutine and results are buffered. Traces are deduped and combined using the object combiner.

func NewObjectReaderWriter

func NewObjectReaderWriter() common.ObjectReaderWriter

func NewPrefetchIterator added in v1.4.0

func NewPrefetchIterator(ctx context.Context, iter common.Iterator, bufferSize int) common.Iterator

NewPrefetchIterator Creates a new multiblock iterator. Iterates concurrently in a separate goroutine and results are buffered.

func NewRecordIterator added in v1.4.0

func NewRecordIterator(r []common.Record, dataR common.DataReader, objectRW common.ObjectReaderWriter) common.Iterator

NewRecordIterator returns a recordIterator. This iterator is used for iterating through

a series of objects by reading them one at a time from Records.

func NewRecordReaderWriter

func NewRecordReaderWriter() common.RecordReaderWriter

Types

type Appender added in v1.4.0

type Appender interface {
	Append(common.ID, []byte) error
	Complete() error
	Records() []common.Record
	RecordsForID(common.ID) []common.Record
	Length() int
	DataLength() uint64
}

Appender is capable of tracking objects and ids that are added to it

func NewAppender added in v1.4.0

func NewAppender(dataWriter common.DataWriter) Appender

NewAppender returns an appender. This appender simply appends new objects

to the provided dataWriter.

func NewBufferedAppender added in v1.4.0

func NewBufferedAppender(writer common.DataWriter, indexDownsample int, totalObjectsEstimate int) (Appender, error)

NewBufferedAppender returns an bufferedAppender. This appender builds a writes to

the provided writer and also builds a downsampled records slice.

func NewRecordAppender added in v1.4.0

func NewRecordAppender(records []common.Record) Appender

NewRecordAppender returns an appender that stores records only.

type BackendBlock added in v1.4.0

type BackendBlock struct {
	// contains filtered or unexported fields
}

BackendBlock represents a block already in the backend.

func NewBackendBlock added in v1.4.0

func NewBackendBlock(meta *backend.BlockMeta, r backend.Reader) (*BackendBlock, error)

NewBackendBlock returns a BackendBlock for the given backend.BlockMeta

func (*BackendBlock) BlockMeta added in v1.4.0

func (b *BackendBlock) BlockMeta() *backend.BlockMeta

func (*BackendBlock) FindTraceByID added in v1.4.0

func (b *BackendBlock) FindTraceByID(ctx context.Context, id common.ID) (*tempopb.Trace, error)

func (*BackendBlock) Iterator added in v1.4.0

func (b *BackendBlock) Iterator(chunkSizeBytes uint32) (common.Iterator, error)

Iterator returns an Iterator that iterates over the objects in the block from the backend

func (*BackendBlock) NewIndexReader added in v1.4.0

func (b *BackendBlock) NewIndexReader() (common.IndexReader, error)

func (*BackendBlock) Search added in v1.4.0

type BufferedAppenderGeneric added in v1.4.0

type BufferedAppenderGeneric struct {
	// contains filtered or unexported fields
}

bufferedAppender buffers objects into pages and builds a downsampled index

func NewBufferedAppenderGeneric added in v1.4.0

func NewBufferedAppenderGeneric(writer common.DataWriterGeneric, maxPageSize int) *BufferedAppenderGeneric

NewBufferedAppender returns an bufferedAppender. This appender builds a writes to

the provided writer and also builds a downsampled records slice.

func (*BufferedAppenderGeneric) Append added in v1.4.0

func (a *BufferedAppenderGeneric) Append(ctx context.Context, id common.ID, i interface{}) error

Append appends the id/object to the writer. Note that the caller is giving up ownership of the two byte arrays backing the slices.

Copies should be made and passed in if this is a problem

func (*BufferedAppenderGeneric) Complete added in v1.4.0

func (a *BufferedAppenderGeneric) Complete(ctx context.Context) error

Complete flushes all buffers and releases resources

func (*BufferedAppenderGeneric) Records added in v1.4.0

func (a *BufferedAppenderGeneric) Records() []common.Record

Records returns a slice of the current records

type Compactor added in v1.4.0

type Compactor struct {
	// contains filtered or unexported fields
}

func NewCompactor added in v1.4.0

func NewCompactor(opts common.CompactionOptions) *Compactor

func (*Compactor) Compact added in v1.4.0

func (c *Compactor) Compact(ctx context.Context, l log.Logger, r backend.Reader, writerCallback func(*backend.BlockMeta, time.Time) backend.Writer, inputs []*backend.BlockMeta) (newCompactedBlocks []*backend.BlockMeta, err error)

type Encoding added in v1.4.0

type Encoding struct{}

v2Encoding

func (Encoding) CopyBlock added in v1.5.0

func (v Encoding) CopyBlock(ctx context.Context, meta *backend.BlockMeta, from backend.Reader, to backend.Writer) error

func (Encoding) CreateBlock added in v1.5.0

func (Encoding) NewCompactor added in v1.4.0

func (v Encoding) NewCompactor(opts common.CompactionOptions) common.Compactor

func (Encoding) OpenBlock added in v1.5.0

func (v Encoding) OpenBlock(meta *backend.BlockMeta, r backend.Reader) (common.BackendBlock, error)

func (Encoding) Version added in v1.4.0

func (v Encoding) Version() string

type GzipPool added in v1.2.0

type GzipPool struct {
	// contains filtered or unexported fields
}

GzipPool is a gun zip compression pool

func (*GzipPool) Encoding added in v1.2.0

func (pool *GzipPool) Encoding() backend.Encoding

Encoding implements WriterPool and ReaderPool

func (*GzipPool) GetReader added in v1.2.0

func (pool *GzipPool) GetReader(src io.Reader) (io.Reader, error)

GetReader gets or creates a new CompressionReader and reset it to read from src

func (*GzipPool) GetWriter added in v1.2.0

func (pool *GzipPool) GetWriter(dst io.Writer) (io.WriteCloser, error)

GetWriter gets or creates a new CompressionWriter and reset it to write to dst

func (*GzipPool) PutReader added in v1.2.0

func (pool *GzipPool) PutReader(reader io.Reader)

PutReader places back in the pool a CompressionReader

func (*GzipPool) PutWriter added in v1.2.0

func (pool *GzipPool) PutWriter(writer io.WriteCloser)

PutWriter places back in the pool a CompressionWriter

func (*GzipPool) ResetReader added in v1.2.0

func (pool *GzipPool) ResetReader(src io.Reader, resetReader io.Reader) (io.Reader, error)

ResetReader implements ReaderPool

func (*GzipPool) ResetWriter added in v1.2.0

func (pool *GzipPool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)

ResetWriter implements WriterPool

type LZ4Pool added in v1.2.0

type LZ4Pool struct {
	// contains filtered or unexported fields
}

LZ4Pool is an pool...of lz4s...

func (*LZ4Pool) Encoding added in v1.2.0

func (pool *LZ4Pool) Encoding() backend.Encoding

Encoding implements WriterPool and ReaderPool

func (*LZ4Pool) GetReader added in v1.2.0

func (pool *LZ4Pool) GetReader(src io.Reader) (io.Reader, error)

GetReader gets or creates a new CompressionReader and reset it to read from src

func (*LZ4Pool) GetWriter added in v1.2.0

func (pool *LZ4Pool) GetWriter(dst io.Writer) (io.WriteCloser, error)

GetWriter gets or creates a new CompressionWriter and reset it to write to dst

func (*LZ4Pool) PutReader added in v1.2.0

func (pool *LZ4Pool) PutReader(reader io.Reader)

PutReader places back in the pool a CompressionReader

func (*LZ4Pool) PutWriter added in v1.2.0

func (pool *LZ4Pool) PutWriter(writer io.WriteCloser)

PutWriter places back in the pool a CompressionWriter

func (*LZ4Pool) ResetReader added in v1.2.0

func (pool *LZ4Pool) ResetReader(src io.Reader, resetReader io.Reader) (io.Reader, error)

ResetReader implements ReaderPool

func (*LZ4Pool) ResetWriter added in v1.2.0

func (pool *LZ4Pool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)

ResetWriter implements WriterPool

type NoopPool added in v1.2.0

type NoopPool struct{}

NoopPool is for people who think compression is for the weak

func (*NoopPool) Encoding added in v1.2.0

func (pool *NoopPool) Encoding() backend.Encoding

Encoding implements WriterPool and ReaderPool

func (*NoopPool) GetReader added in v1.2.0

func (pool *NoopPool) GetReader(src io.Reader) (io.Reader, error)

GetReader gets or creates a new CompressionReader and reset it to read from src

func (*NoopPool) GetWriter added in v1.2.0

func (pool *NoopPool) GetWriter(dst io.Writer) (io.WriteCloser, error)

GetWriter gets or creates a new CompressionWriter and reset it to write to dst

func (*NoopPool) PutReader added in v1.2.0

func (pool *NoopPool) PutReader(reader io.Reader)

PutReader places back in the pool a CompressionReader

func (*NoopPool) PutWriter added in v1.2.0

func (pool *NoopPool) PutWriter(writer io.WriteCloser)

PutWriter places back in the pool a CompressionWriter

func (*NoopPool) ResetReader added in v1.2.0

func (pool *NoopPool) ResetReader(src io.Reader, resetReader io.Reader) (io.Reader, error)

ResetReader implements ReaderPool

func (*NoopPool) ResetWriter added in v1.2.0

func (pool *NoopPool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)

ResetWriter implements WriterPool

type PagedFinder added in v1.4.0

type PagedFinder struct {
	// contains filtered or unexported fields
}

func NewPagedFinder added in v1.4.0

func NewPagedFinder(index common.IndexReader, r common.DataReader, combiner model.ObjectCombiner, objectRW common.ObjectReaderWriter, dataEncoding string) *PagedFinder

NewPagedFinder returns a paged. This finder is used for searching

a set of records and returning an object. If a set of consecutive records has
matching ids they will be combined using the ObjectCombiner.

func (*PagedFinder) Find added in v1.4.0

func (f *PagedFinder) Find(ctx context.Context, id common.ID) ([]byte, error)

type ReaderPool added in v1.2.0

type ReaderPool interface {
	GetReader(io.Reader) (io.Reader, error)
	PutReader(io.Reader)
	ResetReader(src io.Reader, resetReader io.Reader) (io.Reader, error)
	Encoding() backend.Encoding
}

ReaderPool similar to WriterPool but for reading chunks.

type S2Pool added in v1.2.0

type S2Pool struct {
	// contains filtered or unexported fields
}

S2Pool is one s short of s3

func (*S2Pool) Encoding added in v1.2.0

func (pool *S2Pool) Encoding() backend.Encoding

Encoding implements WriterPool and ReaderPool

func (*S2Pool) GetReader added in v1.2.0

func (pool *S2Pool) GetReader(src io.Reader) (io.Reader, error)

GetReader gets or creates a new CompressionReader and reset it to read from src

func (*S2Pool) GetWriter added in v1.2.0

func (pool *S2Pool) GetWriter(dst io.Writer) (io.WriteCloser, error)

GetWriter gets or creates a new CompressionWriter and reset it to write to dst

func (*S2Pool) PutReader added in v1.2.0

func (pool *S2Pool) PutReader(reader io.Reader)

PutReader places back in the pool a CompressionReader

func (*S2Pool) PutWriter added in v1.2.0

func (pool *S2Pool) PutWriter(writer io.WriteCloser)

PutWriter places back in the pool a CompressionWriter

func (*S2Pool) ResetReader added in v1.2.0

func (pool *S2Pool) ResetReader(src io.Reader, resetReader io.Reader) (io.Reader, error)

ResetReader implements ReaderPool

func (*S2Pool) ResetWriter added in v1.2.0

func (pool *S2Pool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)

ResetWriter implements WriterPool

type SnappyPool added in v1.2.0

type SnappyPool struct {
	// contains filtered or unexported fields
}

SnappyPool is a really cool looking pool. Dang that pool is _snappy_.

func (*SnappyPool) Encoding added in v1.2.0

func (pool *SnappyPool) Encoding() backend.Encoding

Encoding implements WriterPool and ReaderPool

func (*SnappyPool) GetReader added in v1.2.0

func (pool *SnappyPool) GetReader(src io.Reader) (io.Reader, error)

GetReader gets or creates a new CompressionReader and reset it to read from src

func (*SnappyPool) GetWriter added in v1.2.0

func (pool *SnappyPool) GetWriter(dst io.Writer) (io.WriteCloser, error)

GetWriter gets or creates a new CompressionWriter and reset it to write to dst

func (*SnappyPool) PutReader added in v1.2.0

func (pool *SnappyPool) PutReader(reader io.Reader)

PutReader places back in the pool a CompressionReader

func (*SnappyPool) PutWriter added in v1.2.0

func (pool *SnappyPool) PutWriter(writer io.WriteCloser)

PutWriter places back in the pool a CompressionWriter

func (*SnappyPool) ResetReader added in v1.2.0

func (pool *SnappyPool) ResetReader(src io.Reader, resetReader io.Reader) (io.Reader, error)

ResetReader implements ReaderPool

func (*SnappyPool) ResetWriter added in v1.2.0

func (pool *SnappyPool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)

ResetWriter implements WriterPool

type StreamingBlock added in v1.4.0

type StreamingBlock struct {
	// contains filtered or unexported fields
}

func NewStreamingBlock added in v1.4.0

func NewStreamingBlock(cfg *common.BlockConfig, id uuid.UUID, tenantID string, metas []*backend.BlockMeta, estimatedObjects int) (*StreamingBlock, error)

NewStreamingBlock creates a ... new streaming block. Objects are appended one at a time to the backend.

func (*StreamingBlock) AddObject added in v1.4.0

func (c *StreamingBlock) AddObject(id common.ID, object []byte) error

func (*StreamingBlock) BlockMeta added in v1.4.0

func (c *StreamingBlock) BlockMeta() *backend.BlockMeta

func (*StreamingBlock) Complete added in v1.4.0

func (c *StreamingBlock) Complete(ctx context.Context, tracker backend.AppendTracker, w backend.Writer) (int, error)

Complete finishes writes the compactor metadata and closes all buffers and appenders

func (*StreamingBlock) CurrentBufferLength added in v1.4.0

func (c *StreamingBlock) CurrentBufferLength() int

func (*StreamingBlock) CurrentBufferedObjects added in v1.4.0

func (c *StreamingBlock) CurrentBufferedObjects() int

func (*StreamingBlock) FlushBuffer added in v1.4.0

FlushBuffer flushes any existing objects to the backend

func (*StreamingBlock) Length added in v1.4.0

func (c *StreamingBlock) Length() int

type WriterPool added in v1.2.0

type WriterPool interface {
	GetWriter(io.Writer) (io.WriteCloser, error)
	PutWriter(io.WriteCloser)
	ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)
	Encoding() backend.Encoding
}

WriterPool is a pool of io.Writer This is used by every chunk to avoid unnecessary allocations.

func GetWriterPool added in v1.2.0

func GetWriterPool(enc backend.Encoding) (WriterPool, error)

type ZstdPool added in v1.2.0

type ZstdPool struct {
}

ZstdPool is a zstd compression pool

func (*ZstdPool) Encoding added in v1.2.0

func (pool *ZstdPool) Encoding() backend.Encoding

Encoding implements WriterPool and ReaderPool

func (*ZstdPool) GetReader added in v1.2.0

func (pool *ZstdPool) GetReader(src io.Reader) (io.Reader, error)

GetReader gets or creates a new CompressionReader and reset it to read from src

func (*ZstdPool) GetWriter added in v1.2.0

func (pool *ZstdPool) GetWriter(dst io.Writer) (io.WriteCloser, error)

GetWriter gets or creates a new CompressionWriter and reset it to write to dst

func (*ZstdPool) PutReader added in v1.2.0

func (pool *ZstdPool) PutReader(reader io.Reader)

PutReader places back in the pool a CompressionReader

func (*ZstdPool) PutWriter added in v1.2.0

func (pool *ZstdPool) PutWriter(writer io.WriteCloser)

PutWriter places back in the pool a CompressionWriter

func (*ZstdPool) ResetReader added in v1.2.0

func (pool *ZstdPool) ResetReader(src io.Reader, resetReader io.Reader) (io.Reader, error)

ResetReader implements ReaderPool

func (*ZstdPool) ResetWriter added in v1.2.0

func (pool *ZstdPool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)

ResetWriter implements WriterPool

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL