benchmark_runner

package
v0.3.5 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 31, 2023 License: MIT Imports: 18 Imported by: 0

Documentation

Index

Constants

View Source
const (
	CurrentResultFormatVersion = "0.1"

	// WorkerPerQueue is the value for assigning each worker its own queue of batches
	WorkerPerQueue = 0
	// SingleQueue is the value for using a single shared queue across all workers
	SingleQueue = 1
	Inf         = rate.Limit(math.MaxFloat64)
)

Variables

This section is empty.

Functions

This section is empty.

Types

type Batch

type Batch interface {
	Len() int
	Append(*DocHolder)
}

Batch is an aggregate of points for a particular databuild system. It needs to have a way to measure it's size to make sure it does not get too large and it needs a way to append a point

type BatchFactory

type BatchFactory interface {
	// New returns a new Batch to add Points to
	New() Batch
}

BatchFactory returns a new empty batch for storing points.

type Benchmark

type Benchmark interface {
	// GetCmdDecoder returns the DocDecoder to use for this Benchmark
	GetCmdDecoder(br *bufio.Reader) DocDecoder

	// GetBatchFactory returns the BatchFactory to use for this Benchmark
	GetBatchFactory() BatchFactory

	// GetCommandIndexer returns the DocIndexer to use for this Benchmark
	GetCommandIndexer(maxPartitions uint) DocIndexer

	// GetProcessor returns the Processor to use for this Benchmark
	GetProcessor() Processor

	// GetConfigurationParametersMap returns the map of specific configurations used in the benchmark
	GetConfigurationParametersMap() map[string]interface{}
}

Benchmark is an interface that represents the skeleton of a program needed to run an insert or benchmark benchmark.

type BenchmarkRunner

type BenchmarkRunner struct {
	// flag fields
	JsonOutFile string
	Metadata    string
	// contains filtered or unexported fields
}

BenchmarkRunner is responsible for initializing and storing common flags across all database systems and ultimately running a supplied Benchmark

func GetBenchmarkRunner

func GetBenchmarkRunner() *BenchmarkRunner

GetBenchmarkRunner returns the singleton BenchmarkRunner for use in a benchmark program with a default batch size

func GetBenchmarkRunnerWithBatchSize

func GetBenchmarkRunnerWithBatchSize(batchSize uint) *BenchmarkRunner

GetBenchmarkRunnerWithBatchSize returns the singleton BenchmarkRunner for use in a benchmark program with specified batch size.

func (*BenchmarkRunner) GetBufferedReader

func (l *BenchmarkRunner) GetBufferedReader() *bufio.Reader

GetBufferedReader returns the buffered Reader that should be used by the loader

func (*BenchmarkRunner) GetMeasuredRatiosMap

func (b *BenchmarkRunner) GetMeasuredRatiosMap() map[string]interface{}

func (*BenchmarkRunner) GetOverallQuantiles

func (b *BenchmarkRunner) GetOverallQuantiles() map[string]interface{}

func (*BenchmarkRunner) GetOverallRatesMap

func (l *BenchmarkRunner) GetOverallRatesMap() map[string]interface{}

func (*BenchmarkRunner) GetPerSecondEncodedHistogramsMap added in v0.3.1

func (b *BenchmarkRunner) GetPerSecondEncodedHistogramsMap() map[uint64]string

func (*BenchmarkRunner) GetTimeSeriesMap

func (b *BenchmarkRunner) GetTimeSeriesMap() map[string]interface{}

func (*BenchmarkRunner) GetTotalsMap

func (b *BenchmarkRunner) GetTotalsMap() map[string]interface{}

func (*BenchmarkRunner) RunBenchmark

func (l *BenchmarkRunner) RunBenchmark(b Benchmark, workQueues uint)

RunBenchmark takes in a Benchmark b, a bufio.Reader br, and holders for number of metrics and rows and reads those to run the benchmark benchmark

type ByTimestamp

type ByTimestamp []DataPoint

ByTimestamp implements sort.Interface based on the Timestamp field of the DataPoint.

func (ByTimestamp) Len

func (a ByTimestamp) Len() int

func (ByTimestamp) Less

func (a ByTimestamp) Less(i, j int) bool

func (ByTimestamp) Swap

func (a ByTimestamp) Swap(i, j int)

type CmdStat

type CmdStat struct {
	// contains filtered or unexported fields
}

func NewCmdStat

func NewCmdStat(cmdGroup []byte, cmdQueryId []byte, latency uint64, error bool, timedOut bool, rx uint64, tx uint64) *CmdStat

func (*CmdStat) CmdQueryId

func (c *CmdStat) CmdQueryId() []byte

func (*CmdStat) Label

func (c *CmdStat) Label() []byte

func (*CmdStat) Latency

func (c *CmdStat) Latency() uint64

func (*CmdStat) Rx

func (c *CmdStat) Rx() uint64

func (*CmdStat) SetLabel

func (c *CmdStat) SetLabel(label []byte)

func (*CmdStat) SetLatency

func (c *CmdStat) SetLatency(latency uint64)

func (*CmdStat) SetRx

func (c *CmdStat) SetRx(rx uint64)

func (*CmdStat) SetStartTs added in v0.3.1

func (c *CmdStat) SetStartTs(startTs uint64)

func (*CmdStat) SetTx

func (c *CmdStat) SetTx(tx uint64)

func (*CmdStat) StartTs added in v0.3.1

func (c *CmdStat) StartTs() uint64

func (*CmdStat) Tx

func (c *CmdStat) Tx() uint64

type ConstantIndexer

type ConstantIndexer struct{}

ConstantIndexer always puts the item on a single channel. This is the typical use case where all the workers share the same channel

func (*ConstantIndexer) GetIndex

func (i *ConstantIndexer) GetIndex(_ *DocHolder) int

GetIndex returns a constant index (0) regardless of DocHolder

type DataPoint

type DataPoint struct {
	Timestamp   int64              `json:"Timestamp"`
	MultiValues map[string]float64 `json:"MultiValues"`
}

func NewDataPoint

func NewDataPoint(timestamp int64) *DataPoint

func (DataPoint) AddValue

func (p DataPoint) AddValue(s string, value float64)

type DocDecoder

type DocDecoder interface {
	//Decode creates a DocHolder from a databuild stream
	Decode(*bufio.Reader) *DocHolder
}

DocDecoder decodes the next databuild point in the process of scanning.

type DocHolder

type DocHolder struct {
	Data interface{}
}

DocHolder acts as a 'holder' for the internal representation of a point in a given benchmark client. Instead of using interface{} as a return type, we get compile safety by using DocHolder

func NewDocument

func NewDocument(data interface{}) *DocHolder

NewDocument creates a Document with the provided databuild as the internal representation

type DocIndexer

type DocIndexer interface {
	// GetIndex returns a partition for the given DocHolder
	GetIndex(uint64, *DocHolder) int
}

DocIndexer determines the index of the Batch (and subsequently the channel) that a particular point belongs to

type Processor

type Processor interface {
	// Init does per-worker setup needed before receiving databuild
	Init(workerNum int, doLoad bool, totalWorkers int)
	// ProcessBatch handles a single batch of databuild
	ProcessBatch(b Batch, doLoad bool, rateLimiter *rate.Limiter, useRateLimiter bool) Stat
}

Processor is a type that processes the work for a loading worker

type ProcessorCloser

type ProcessorCloser interface {
	Processor
	// Close cleans up after a Processor
	Close(doLoad bool)
}

ProcessorCloser is a Processor that also needs to close or cleanup afterwards

type Stat

type Stat struct {
	// contains filtered or unexported fields
}

Stat represents one statistical measurement, typically used to store the latency of a command

func NewStat

func NewStat() *Stat

func (*Stat) AddCmdStatEntry

func (s *Stat) AddCmdStatEntry(stat CmdStat)

func (*Stat) AddEntry

func (s *Stat) AddEntry(cmdGroup []byte, cmdQueryId []byte, startTs, latencyUs uint64, error bool, timedOut bool, rx, tx uint64) *Stat

func (*Stat) CmdStats

func (s *Stat) CmdStats() []CmdStat

func (*Stat) GetCmdsCount

func (s *Stat) GetCmdsCount() uint64

func (*Stat) Merge

func (s *Stat) Merge(stat Stat)

func (*Stat) SetCmdStats

func (s *Stat) SetCmdStats(cmdStats []CmdStat)

type TestResult

type TestResult struct {

	// Test Configs
	Metadata            string `json:"Metadata"`
	ResultFormatVersion string `json:"ResultFormatVersion"`
	Limit               uint64 `json:"Limit"`
	Workers             uint   `json:"Workers"`
	MaxRps              uint64 `json:"MaxRps"`

	// DB Spefic Configs
	DBSpecificConfigs map[string]interface{} `json:"DBSpecificConfigs"`

	StartTime      int64 `json:"StartTime"`
	EndTime        int64 `json:"EndTime"`
	DurationMillis int64 `json:"DurationMillis"`

	// Totals
	Totals map[string]interface{} `json:"Totals"`

	MeasuredRatios map[string]interface{} `json:"MeasuredRatios"`

	// Overall Rates
	OverallRates map[string]interface{} `json:"OverallRates"`

	// Overall Quantiles
	OverallQuantiles map[string]interface{} `json:"OverallQuantiles"`

	// Time-Series
	TimeSeries map[string]interface{} `json:"TimeSeries"`

	PerSecondEncodedHistograms map[uint64]string `json:"PerSecondEncodedHistograms"`
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL