symdb

package
v1.4.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 6, 2024 License: AGPL-3.0 Imports: 40 Imported by: 0

Documentation

Index

Constants

View Source
const (
	DefaultDirName = "symbols"

	IndexFileName       = "index.symdb"
	StacktracesFileName = "stacktraces.symdb"
)
View Source
const (
	FormatV1
	FormatV2
)
View Source
const HeaderSize = int(unsafe.Sizeof(Header{}))

Variables

View Source
var (
	ErrInvalidSize    = &FormatError{fmt.Errorf("invalid size")}
	ErrInvalidCRC     = &FormatError{fmt.Errorf("invalid CRC")}
	ErrInvalidMagic   = &FormatError{fmt.Errorf("invalid magic number")}
	ErrUnknownVersion = &FormatError{fmt.Errorf("unknown version")}
)
View Source
var ErrInvalidStacktraceRange = fmt.Errorf("invalid range: stack traces can't be resolved")
View Source
var ErrPartitionNotFound = fmt.Errorf("partition not found")

Functions

This section is empty.

Types

type CallSiteValues added in v1.4.0

type CallSiteValues struct {
	// Flat is the sum of sample values directly attributed to the node.
	Flat uint64
	// Total is the total sum of sample values attributed to the node and
	// its descendants.
	Total uint64
	// LocationFlat is the sum of sample values directly attributed to the
	// node location, irrespectively of the call chain.
	LocationFlat uint64
	// LocationTotal is the total sum of sample values attributed to the
	// node location and its descendants, irrespectively of the call chain.
	LocationTotal uint64
}

CallSiteValues represents statistics associated with a call tree node.

type ChunkEncoding

type ChunkEncoding byte
const (
	ChunkEncodingGroupVarint ChunkEncoding
)

type Config

type Config struct {
	Dir         string
	Stacktraces StacktracesConfig
	Parquet     ParquetConfig
}

func DefaultConfig

func DefaultConfig() *Config

func (*Config) WithDirectory

func (c *Config) WithDirectory(dir string) *Config

func (*Config) WithParquetConfig

func (c *Config) WithParquetConfig(pc ParquetConfig) *Config

type FormatError

type FormatError struct {
	// contains filtered or unexported fields
}

func (*FormatError) Error

func (e *FormatError) Error() string
type Header struct {
	Magic    [4]byte
	Version  uint32
	Reserved [8]byte // Reserved for future use.
}

func (*Header) MarshalBinary

func (h *Header) MarshalBinary() ([]byte, error)

func (*Header) UnmarshalBinary

func (h *Header) UnmarshalBinary(b []byte) error

type Helper

type Helper[M schemav1.Models, K comparable] interface {
	// contains filtered or unexported methods
}

type IndexFile

type IndexFile struct {
	Header Header
	TOC    TOC

	// Version-specific parts.
	PartitionHeaders PartitionHeaders

	CRC uint32
}

func ReadIndexFile

func ReadIndexFile(b []byte) (f IndexFile, err error)

func (*IndexFile) WriteTo

func (f *IndexFile) WriteTo(dst io.Writer) (n int64, err error)

type MemoryStats

type MemoryStats struct {
	StacktracesSize uint64
	LocationsSize   uint64
	MappingsSize    uint64
	FunctionsSize   uint64
	StringsSize     uint64
}

func (*MemoryStats) MemorySize

func (m *MemoryStats) MemorySize() uint64

type ParquetConfig

type ParquetConfig struct {
	MaxBufferRowCount int
}

type PartitionHeader

type PartitionHeader struct {
	Partition uint64

	StacktraceChunks []StacktraceChunkHeader
	Locations        []RowRangeReference
	Mappings         []RowRangeReference
	Functions        []RowRangeReference
	Strings          []RowRangeReference
}

func (*PartitionHeader) Size

func (h *PartitionHeader) Size() int64

type PartitionHeaders

type PartitionHeaders []*PartitionHeader

func (*PartitionHeaders) Size

func (h *PartitionHeaders) Size() int64

func (*PartitionHeaders) Unmarshal

func (h *PartitionHeaders) Unmarshal(b []byte) error

func (*PartitionHeaders) WriteTo

func (h *PartitionHeaders) WriteTo(dst io.Writer) (_ int64, err error)

type PartitionReader

type PartitionReader interface {
	WriteStats(s *PartitionStats)
	Symbols() *Symbols
	Release()
}

type PartitionStats

type PartitionStats struct {
	StacktracesTotal int
	MaxStacktraceID  int
	LocationsTotal   int
	MappingsTotal    int
	FunctionsTotal   int
	StringsTotal     int
}

type PartitionWriter

type PartitionWriter struct {
	// contains filtered or unexported fields
}

func (*PartitionWriter) AppendFunctions

func (p *PartitionWriter) AppendFunctions(dst []uint32, functions []*schemav1.InMemoryFunction)

func (*PartitionWriter) AppendLocations

func (p *PartitionWriter) AppendLocations(dst []uint32, locations []*schemav1.InMemoryLocation)

func (*PartitionWriter) AppendMappings

func (p *PartitionWriter) AppendMappings(dst []uint32, mappings []*schemav1.InMemoryMapping)

func (*PartitionWriter) AppendStacktraces

func (p *PartitionWriter) AppendStacktraces(dst []uint32, s []*schemav1.Stacktrace)

func (*PartitionWriter) AppendStrings

func (p *PartitionWriter) AppendStrings(dst []uint32, strings []string)

func (*PartitionWriter) LookupLocations added in v1.2.1

func (p *PartitionWriter) LookupLocations(dst []uint64, stacktraceID uint32) []uint64

func (*PartitionWriter) Release

func (p *PartitionWriter) Release()

func (*PartitionWriter) ResolveChunk

func (p *PartitionWriter) ResolveChunk(dst StacktraceInserter, sr StacktracesRange) error

func (*PartitionWriter) ResolveStacktraceLocations

func (p *PartitionWriter) ResolveStacktraceLocations(_ context.Context, dst StacktraceInserter, stacktraces []uint32) error

func (*PartitionWriter) Symbols

func (p *PartitionWriter) Symbols() *Symbols

func (*PartitionWriter) WriteProfileSymbols

func (p *PartitionWriter) WriteProfileSymbols(profile *profilev1.Profile) []schemav1.InMemoryProfile

func (*PartitionWriter) WriteStats

func (p *PartitionWriter) WriteStats(s *PartitionStats)

type Reader

type Reader struct {
	// contains filtered or unexported fields
}

func Open

func (*Reader) Close

func (r *Reader) Close() error

func (*Reader) Load

func (r *Reader) Load(ctx context.Context) error

Load loads all the partitions into memory. Partitions are kept in memory during the whole lifetime of the Reader object.

The main user of the function is Rewriter: as far as is not known which partitions will be fetched in advance, but it is known that all of them or majority will be requested, preloading is more efficient yet consumes more memory.

func (*Reader) Partition

func (r *Reader) Partition(ctx context.Context, partition uint64) (PartitionReader, error)

type Resolver

type Resolver struct {
	// contains filtered or unexported fields
}

Resolver converts stack trace samples to one of the profile formats, such as tree or pprof.

Resolver asynchronously loads symbols for each partition as they are added with AddSamples or Partition calls.

A new Resolver must be created for each profile.

func NewResolver

func NewResolver(ctx context.Context, s SymbolsReader, opts ...ResolverOption) *Resolver

func (*Resolver) AddSamples

func (r *Resolver) AddSamples(partition uint64, s schemav1.Samples)

AddSamples adds a collection of stack trace samples to the resolver. Samples can be added to partitions concurrently.

func (*Resolver) AddSamplesFromParquetRow added in v1.3.0

func (r *Resolver) AddSamplesFromParquetRow(partition uint64, stacktraceIDs, values []parquet.Value)

func (*Resolver) AddSamplesWithSpanSelector added in v1.2.0

func (r *Resolver) AddSamplesWithSpanSelector(partition uint64, s schemav1.Samples, spanSelector model.SpanSelector)

func (*Resolver) CallSiteValues added in v1.4.0

func (r *Resolver) CallSiteValues(values *CallSiteValues, partition uint64, samples schemav1.Samples) error

func (*Resolver) CallSiteValuesParquet added in v1.4.0

func (r *Resolver) CallSiteValuesParquet(values *CallSiteValues, partition uint64, stacktraceID, value []parquet.Value) error

func (*Resolver) Pprof added in v1.2.1

func (r *Resolver) Pprof() (*googlev1.Profile, error)

func (*Resolver) Release

func (r *Resolver) Release()

func (*Resolver) Tree

func (r *Resolver) Tree() (*model.Tree, error)

func (*Resolver) WithPartitionSamples added in v1.3.0

func (r *Resolver) WithPartitionSamples(partition uint64, fn func(map[uint32]int64))

type ResolverOption

type ResolverOption func(*Resolver)

func WithResolverMaxConcurrent added in v1.3.0

func WithResolverMaxConcurrent(n int) ResolverOption

WithResolverMaxConcurrent specifies how many partitions can be resolved concurrently.

func WithResolverMaxNodes added in v1.3.0

func WithResolverMaxNodes(n int64) ResolverOption

WithResolverMaxNodes specifies the desired maximum number of nodes the resulting profile should include.

func WithResolverStackTraceSelector added in v1.3.0

func WithResolverStackTraceSelector(sts *typesv1.StackTraceSelector) ResolverOption

WithResolverStackTraceSelector specifies the stack trace selector. Only stack traces that belong to the callSite (have the prefix provided) will be selected. If empty, the filter is ignored. Subtree root location is the last element.

type Rewriter

type Rewriter struct {
	// contains filtered or unexported fields
}

func NewRewriter

func NewRewriter(w *SymDB, r SymbolsReader) *Rewriter

func (*Rewriter) Rewrite

func (r *Rewriter) Rewrite(partition uint64, stacktraces []uint32) error

type RowRangeReference

type RowRangeReference struct {
	RowGroup uint32
	Index    uint32
	Rows     uint32
}

type SelectedStackTraces added in v1.4.0

type SelectedStackTraces struct {
	// contains filtered or unexported fields
}

func SelectStackTraces added in v1.4.0

func SelectStackTraces(symbols *Symbols, selector *typesv1.StackTraceSelector) *SelectedStackTraces

func (*SelectedStackTraces) CallSiteValues added in v1.4.0

func (x *SelectedStackTraces) CallSiteValues(values *CallSiteValues, samples schemav1.Samples)

CallSiteValues writes the call site statistics for the selected stack traces and the given set of samples.

func (*SelectedStackTraces) CallSiteValuesParquet added in v1.4.0

func (x *SelectedStackTraces) CallSiteValuesParquet(values *CallSiteValues, stacktraceID, value []parquet.Value)

CallSiteValuesParquet is identical to CallSiteValues but accepts raw parquet values instead of samples.

func (*SelectedStackTraces) IsValid added in v1.4.0

func (x *SelectedStackTraces) IsValid() bool

IsValid reports whether any stack traces match the selector. An empty selector results in a valid empty selection.

type StacktraceChunkHeader

type StacktraceChunkHeader struct {
	Offset int64
	Size   int64

	Partition     uint64
	ChunkIndex    uint16
	ChunkEncoding ChunkEncoding

	Stacktraces        uint32 // Number of unique stack traces in the chunk.
	StacktraceNodes    uint32 // Number of nodes in the stacktrace tree.
	StacktraceMaxDepth uint32 // Max stack trace depth in the tree.
	StacktraceMaxNodes uint32 // Max number of nodes at the time of the chunk creation.

	CRC uint32 // Checksum of the chunk data [Offset:Size).
	// contains filtered or unexported fields
}

type StacktraceInserter

type StacktraceInserter interface {
	InsertStacktrace(stacktraceID uint32, locations []int32)
}

StacktraceInserter accepts resolved locations for a given stack trace. The leaf is at locations[0].

Locations slice must not be retained by implementation. It is guaranteed, that for a given stacktrace ID InsertStacktrace is called not more than once.

type StacktraceResolver

type StacktraceResolver interface {
	// ResolveStacktraceLocations resolves locations for each stack
	// trace and inserts it to the StacktraceInserter provided.
	//
	// The stacktraces must be ordered in the ascending order.
	// If a stacktrace can't be resolved, dst receives an empty
	// array of locations.
	//
	// Stacktraces slice might be modified during the call.
	ResolveStacktraceLocations(ctx context.Context, dst StacktraceInserter, stacktraces []uint32) error
	LookupLocations(dst []uint64, stacktraceID uint32) []uint64
}

type StacktracesConfig

type StacktracesConfig struct {
	MaxNodesPerChunk uint32
}

type StacktracesRange

type StacktracesRange struct {
	// contains filtered or unexported fields
}

func SplitStacktraces

func SplitStacktraces(s []uint32, n uint32) []StacktracesRange

SplitStacktraces splits the range of stack trace IDs by limit n into sub-ranges matching to the corresponding chunks and shifts the values accordingly. Note that the input s is modified in place.

stack trace ID 0 is reserved and is not expected at the input. stack trace ID % max_nodes == 0 is not expected as well.

type SymDB

type SymDB struct {
	// contains filtered or unexported fields
}

func NewSymDB

func NewSymDB(c *Config) *SymDB

func (*SymDB) Files

func (s *SymDB) Files() []block.File

func (*SymDB) Flush

func (s *SymDB) Flush() error

func (*SymDB) Load added in v1.2.0

func (s *SymDB) Load(context.Context) error

func (*SymDB) MemorySize

func (s *SymDB) MemorySize() uint64

func (*SymDB) Partition

func (s *SymDB) Partition(_ context.Context, partition uint64) (PartitionReader, error)

func (*SymDB) PartitionWriter

func (s *SymDB) PartitionWriter(partition uint64) *PartitionWriter

func (*SymDB) WriteMemoryStats

func (s *SymDB) WriteMemoryStats(m *MemoryStats)

func (*SymDB) WriteProfileSymbols

func (s *SymDB) WriteProfileSymbols(partition uint64, profile *profilev1.Profile) []schemav1.InMemoryProfile

type Symbols

type Symbols struct {
	Stacktraces StacktraceResolver
	Locations   []*schemav1.InMemoryLocation
	Mappings    []*schemav1.InMemoryMapping
	Functions   []*schemav1.InMemoryFunction
	Strings     []string
}

func (*Symbols) Pprof added in v1.2.1

func (r *Symbols) Pprof(
	ctx context.Context,
	samples schemav1.Samples,
	maxNodes int64,
	selection *SelectedStackTraces,
) (*googlev1.Profile, error)

func (*Symbols) Tree

func (r *Symbols) Tree(ctx context.Context, samples schemav1.Samples) (*model.Tree, error)

type SymbolsReader

type SymbolsReader interface {
	Partition(ctx context.Context, partition uint64) (PartitionReader, error)
	Load(context.Context) error
}

SymbolsReader provides access to a symdb partition.

type TOC

type TOC struct {
	Entries []TOCEntry
}

func (*TOC) MarshalBinary

func (toc *TOC) MarshalBinary() ([]byte, error)

func (*TOC) Size

func (toc *TOC) Size() int

func (*TOC) UnmarshalBinary

func (toc *TOC) UnmarshalBinary(b []byte) error

type TOCEntry

type TOCEntry struct {
	Offset int64
	Size   int64
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL