Documentation ¶
Index ¶
Constants ¶
This section is empty.
Variables ¶
var Wrap = errors.Wrap
Functions ¶
func Ret ¶
func Ret[In any, W comparable, PT interface { *In M(in In) W }](in In) W
Types ¶
type BenchmarkFunc ¶
type BenchmarkFunc[In any, W comparable] struct { NameFunc string Fn func(In) W }
type BenchmarkInput ¶
type BenchmarkInput[In any, W comparable] struct { Name string In In }
type BenchmarkTable ¶
type BenchmarkTable[In any, W comparable] struct { Name string Return bool Global bool Funcs []BenchmarkFunc[In, W] Inputs []BenchmarkInput[In, W] }
func (*BenchmarkTable[In, W]) AddFuncs ¶
func (tbl *BenchmarkTable[In, W]) AddFuncs(funcs []BenchmarkFunc[In, W])
func (*BenchmarkTable[In, W]) AddInputs ¶
func (tbl *BenchmarkTable[In, W]) AddInputs(inputs []BenchmarkInput[In, W])
func (*BenchmarkTable[In, W]) Run ¶
func (tbl *BenchmarkTable[In, W]) Run(b *testing.B)
type BenchmarkTableEntry ¶
type BenchmarkTableEntry[G any, W comparable] struct { Name string Fn func(in ...G) W In []G Want W WantErr bool }
func (*BenchmarkTableEntry[G, W]) Run ¶
func (bte *BenchmarkTableEntry[G, W]) Run(b *testing.B) error
func (bte *BenchmarkTableEntry[G, W]) Name() string { return bte.name } func (bte *BenchmarkTableEntry[G, W]) In() []G { return bte.in } func (bte *BenchmarkTableEntry[G, W]) Want() W { return *new(W) } func (bte *BenchmarkTableEntry[G, W]) Got() W { return *new(W) } func (bte *BenchmarkTableEntry[G, W]) WantErr() bool { return false }
type Runner ¶
func MakeBenchmarkRunner ¶
func MakeBenchmarkRunner[In any, W comparable]( name string, Return, Global bool, funcs []BenchmarkFunc[In, W], inputs []BenchmarkInput[In, W], ) Runner
MakeBenchmarkRunner returns an interface that can be Run() to perform all benchmarks comparing several implementations of varying forms of an algorithm.
The goal, or incentive, for each can be setup to be speed, accuracy, throughput, accuracy, memory usage, etc.
The function takes a name, func, and slice of data and adds them to a table. The entire set is returned as a Runner ready for immediate use in benchmarking.
The anonymous table is not accessible for any other functionality.
type TestDataDetails ¶
type TestDataDetails[In any, W comparable] struct { Name string In In Want W WantErr bool }
type TestDataType ¶
type TestDataType[In any, W comparable] struct { NameFunc string Fn func(In) W TestDataDetails[In, W] }
func (*TestDataType[In, W]) Benchmark ¶
func (tt *TestDataType[In, W]) Benchmark(b *testing.B) (err error)
func (*TestDataType[In, W]) Got ¶
func (tt *TestDataType[In, W]) Got() W
type TestRunner ¶
func MakeTestRunner ¶
func MakeTestRunner[In any, W comparable]( name string, fn func(In) W, testdata []TestDataDetails[In, W], ) TestRunner
MakeTestRunner returns an interface that can be Run() to perform all tests on a single function.
The function takes a name, func, and slice of data adds them to a test table. The entire set is returned as a TestRunner ready for immediate use in testing.
The function name and fn are used for the entire set. Each of the items in testdata (In, Want, and WantErr) are used as the input, output, and bool to indicate if an error is desired in separate tests.
The anonymous test table is not accessible for any other functionality.
type TestTable ¶
type TestTable[In any, W comparable] struct { Name string Tests []TestDataType[In, W] }
func NewTestTable ¶
func NewTestTable[G any, W comparable](name string) *TestTable[G, W]
func (*TestTable[In, W]) Add ¶
func (tbl *TestTable[In, W]) Add(entry TestDataType[In, W])
func (*TestTable[In, W]) AddSet ¶
func (tbl *TestTable[In, W]) AddSet(name string, fn func(In) W, entries []TestDataDetails[In, W]) TestRunner