optimize

package
v0.0.0-...-0c56139 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jul 1, 2022 License: MIT, MIT Imports: 10 Imported by: 0

README

Documentation

Index

Examples

Constants

This section is empty.

Variables

This section is empty.

Functions

func Bissection

func Bissection(a, b, tol float64, f func(float64) float64, logger *log.Logger) (float64, error)

Bissection find zero of f using Bissection's method logger may be nil

Example
//On cherche à identifier une racine de f(x) = (x + 3)(x − 1)2
f := func(x float64) float64 {
	xless1 := x - 1
	y := (x + 3) * xless1 * xless1
	return y
}
//On prend [a0; b0] = [−4; 4/3]
a, b := -4.0, 4./3.
_, err := Bissection(a, b, 1e-9, f, log.New(os.Stdout, "", 0))
if err != nil {
	panic(err)
}
Output:

0 a,fa=-4, -25 b,fb=1.3333,0.48148
1 a,fa=-4, -25 b,fb=-1.3333,9.0741
2 a,fa=-4, -25 b,fb=-2.6667,4.4815
3 a,fa=-3.3333, -6.2593 b,fb=-2.6667,4.4815
4 a,fa=-2.6667, 4.4815 b,fb=-3,0

func Brent

func Brent(a, b, tol float64, f func(float64) float64, logger *log.Logger) (float64, error)

Brent find zero of f using Brent's method see https://en.wikipedia.org/wiki/Brent%27s_method logger may be nil

Example
//On cherche à identifier une racine de f(x) = (x + 3)(x − 1)2
f := func(x float64) float64 {
	xless1 := x - 1
	y := (x + 3) * xless1 * xless1
	return y
}
//On prend [a0; b0] = [−4; 4/3]
a, b := -4.0, 4./3.
_, err := Brent(a, b, 1e-9, f, log.New(os.Stdout, "", 0))
if err != nil {
	fmt.Println(err.Error())
}
Output:

func Gss

func Gss(f func(float64) float64, a, b, tol float64, logger *log.Logger) (float64, float64)

Gss golden section search (recursive version) https://en.wikipedia.org/wiki/Golden-section_search ”' Golden section search, recursive. Given a function f with a single local minimum in the interval [a,b], gss returns a subset interval [c,d] that contains the minimum with d-c <= tol.

logger may be nil

example: >>> f = lambda x: (x-2)**2 >>> a = 1 >>> b = 5 >>> tol = 1e-5 >>> (c,d) = gssrec(f, a, b, tol) >>> print (c,d) (1.9999959837979107, 2.0000050911830893) ”'

Example
f := func(x float64) float64 {
	tmp := x - 2
	return tmp * tmp
}
logger := log.New(os.Stdout, "", 0)
Gss(f, 1, 5, 1e-6, logger)
Output:

0	        1	        5
1	        1	  3.47214
2	        1	  2.52786
3	  1.58359	  2.52786
4	  1.58359	  2.16718
5	   1.8065	  2.16718
6	  1.94427	  2.16718
7	  1.94427	  2.08204
8	  1.94427	  2.02942
9	  1.97679	  2.02942
10	  1.97679	  2.00932
11	  1.98922	  2.00932
12	  1.99689	  2.00932
13	  1.99689	  2.00457
14	  1.99689	  2.00164
15	  1.99871	  2.00164
16	  1.99871	  2.00052
17	   1.9994	  2.00052
18	  1.99983	  2.00052
19	  1.99983	  2.00025
20	  1.99983	  2.00009
21	  1.99993	  2.00009
22	  1.99993	  2.00003
23	  1.99997	  2.00003
24	  1.99999	  2.00003
25	  1.99999	  2.00001
26	  1.99999	  2.00001
27	        2	  2.00001
28	        2	        2
29	        2	        2
30	        2	        2
31	        2	        2
32	        2	        2

Types

type BrentMinimizer

type BrentMinimizer struct {
	Func    func(float64) float64
	Tol     float64
	Maxiter int

	Xmin           float64
	Fval           float64
	Iter, Funcalls int
	Brack          []float64

	FnMaxFev func(int) bool
	// contains filtered or unexported fields
}

BrentMinimizer is the translation of class Brent in scipy/optimize/optimize.py Uses inverse parabolic interpolation when possible to speed up convergence of golden section method.

Example
f := func(x float64) float64 { return x * x }
tol := 1e-8
maxIter := 500
fnMaxFev := func(nfev int) bool { return nfev > 1500 }
bm := NewBrentMinimizer(f, tol, maxIter, fnMaxFev)
bm.Brack = []float64{1, 2}
x, fx, nIter, nFev := bm.Optimize()
fmt.Printf("x: %.8g, fx: %.8g, nIter: %d, nFev: %d\n", x, fx, nIter, nFev)

bm.Brack = []float64{-1, 0.5, 2}
x, fx, nIter, nFev = bm.Optimize()
fmt.Printf("x: %.8g, fx: %.8g, nIter: %d, nFev: %d\n", x, fx, nIter, nFev)
Output:

x: 0, fx: 0, nIter: 4, nFev: 9
x: -2.7755576e-17, fx: 7.7037198e-34, nIter: 5, nFev: 9

func NewBrentMinimizer

func NewBrentMinimizer(fun func(float64) float64, tol float64, maxiter int, fnMaxFev func(int) bool) *BrentMinimizer

NewBrentMinimizer returns an initialized *BrentMinimizer

func (*BrentMinimizer) Optimize

func (bm *BrentMinimizer) Optimize() (x, fx float64, iter, funcalls int)

Optimize search the value of X minimizing bm.Func

func (*BrentMinimizer) SetBracket

func (bm *BrentMinimizer) SetBracket(brack []float64)

SetBracket can be used to set initial bracket of BrentMinimizer. len(brack) must be between 1 and 3 inclusive.

type CmaEsCholB

type CmaEsCholB struct {
	//optimize.CmaEsChol
	// InitStepSize sets the initial size of the covariance matrix adaptation.
	// If InitStepSize is 0, a default value of 0.5 is used. InitStepSize cannot
	// be negative, or CmaEsCholB will panic.
	InitStepSize float64
	// Population sets the population size for the algorithm. If Population is
	// 0, a default value of 4 + math.Floor(3*math.Log(float64(dim))) is used.
	// Population cannot be negative or CmaEsCholB will panic.
	Population int
	// InitCholesky specifies the Cholesky decomposition of the covariance
	// matrix for the initial sampling distribution. If InitCholesky is nil,
	// a default value of I is used. If it is non-nil, then it must have
	// InitCholesky.Size() be equal to the problem dimension.
	InitCholesky *mat.Cholesky
	// StopLogDet sets the threshold for stopping the optimization if the
	// distribution becomes too peaked. The log determinant is a measure of the
	// (log) "volume" of the normal distribution, and when it is too small
	// the samples are almost the same. If the log determinant of the covariance
	// matrix becomes less than StopLogDet, the optimization run is concluded.
	// If StopLogDet is 0, a default value of dim*log(1e-16) is used.
	// If StopLogDet is NaN, the stopping criterion is not used, though
	// this can cause numeric instabilities in the algorithm.
	StopLogDet float64
	// ForgetBest, when true, does not track the best overall function value found,
	// instead returning the new best sample in each iteration. If ForgetBest
	// is false, then the minimum value returned will be the lowest across all
	// iterations, regardless of when that sample was generated.
	ForgetBest bool
	// Src allows a random number generator to be supplied for generating samples.
	// If Src is nil the generator in golang.org/x/math/rand is used.
	Src rand.Source

	// Overall best.
	Xmin, Xmax []float64
	// contains filtered or unexported fields
}

CmaEsCholB is optimize.CmaEsChol with xmin,xmax constraints only sendTask,ensureBounds are different

Example
problem := optimize.Problem{
	Func: func(x []float64) float64 {
		return x[0]*x[0] + x[1]*x[1]
	},
}
initX := []float64{1, 1}
method := &CmaEsCholB{Xmin: []float64{.1, math.Inf(-1)}}
method.Src = rand.NewSource(uint64(1))
settings := &optimize.Settings{FuncEvaluations: 500}

res, err := optimize.Minimize(problem, initX, settings, method)
if err != nil {
	panic(err)
}
//fmt.Printf("%#v\n", res)
if math.Abs(res.Location.X[0]-.1) > 1e-2 || math.Abs(res.Location.X[1]-.0) > 1e-2 {
	fmt.Printf("%.5f", res.Location.X)

}
Output:

func (*CmaEsCholB) Init

func (cma *CmaEsCholB) Init(dim, tasks int) int

Init ...

func (*CmaEsCholB) Needs

func (cma *CmaEsCholB) Needs() struct{ Gradient, Hessian bool }

Needs ...

func (*CmaEsCholB) Run

func (cma *CmaEsCholB) Run(operations chan<- optimize.Task, results <-chan optimize.Task, tasks []optimize.Task)

Run ...

func (*CmaEsCholB) Status

func (cma *CmaEsCholB) Status() (optimize.Status, error)

Status returns the status of the method.

func (*CmaEsCholB) Uses

Uses ...

type Powell

type Powell struct {
	PM *PowellMinimizer
	// contains filtered or unexported fields
}

Powell is a global optimizer that evaluates the function at random locations. Not a good optimizer, but useful for comparison and debugging.

func (*Powell) Init

func (g *Powell) Init(dim, tasks int) int

Init for Powell to implement gonum optimize.Method

func (*Powell) Run

func (g *Powell) Run(operation chan<- optimize.Task, result <-chan optimize.Task, tasks []optimize.Task)

Run for Powell to implement gonum optimize.Method

Example
settings := &optimize.Settings{
	//MajorIterations: 50,
	//FuncEvaluations: 50,
	//Recorder:        optimize.NewPrinter(),
}
method := &Powell{}
res, err := optimize.Minimize(optimize.Problem{
	Func: func(x []float64) float64 { return 1 - math.Exp(1/(1+x[0]*x[0]+x[1]*x[1]))/math.E },
}, []float64{10, 20}, settings, method)
if err != nil {
	panic(err)
}
fmt.Printf("%s %.5f\n", res.Status, res.X)
Output:

MethodConverge [-0.00033 -0.00317]

func (*Powell) Status

func (g *Powell) Status() (optimize.Status, error)

Status ...

func (*Powell) Uses

func (g *Powell) Uses(has optimize.Available) (optimize.Available, error)

Uses for Powell to implement gonum optimize.Needser

type PowellMinimizer

type PowellMinimizer struct {
	Callback        func([]float64)
	Xtol, Ftol      float64
	MaxIter, MaxFev int
	Logger          *log.Logger
}

PowellMinimizer minimizes a scalar function of multidimensionnal x using modified Powell algorithm (see fmin_powell in scipy.optimize)

Example
pm := NewPowellMinimizer()
pm.Callback = func(x []float64) {
	fmt.Printf("%.5f\n", x)
}
pm.Logger = log.New(os.Stdout, "", 0)

pm.Minimize(
	func(x []float64) float64 { return -math.Exp(1 / (1 + x[0]*x[0] + x[1]*x[1])) },
	[]float64{10, 20},
)
Output:

[-0.02748 -0.02037]
[0.00818 -0.00407]
[0.00154 -0.00337]
Success. Current function value: -2.718245 Iterations: 3 Function evaluations: 69

func NewPowellMinimizer

func NewPowellMinimizer() (pm *PowellMinimizer)

NewPowellMinimizer return a PowellMinimizer with default tolerances

func (*PowellMinimizer) Minimize

func (pm *PowellMinimizer) Minimize(f func([]float64) float64, x0 []float64)

Minimize minimizes f starting at x0

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL