nlopt

package module
v0.0.0-...-443d336 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 19, 2023 License: MIT Imports: 6 Imported by: 3

README

A NLopt implementation for Go

A package to provide functionality of object-oriented C-API of NLopt for the Go programming language (http://golang.org). This provides a wrapper using cgo to a c-based implementation.

Status

Build Status Coverage Status GoDoc

Installation

  • On RedHat/CentOS/Fedora
yum/dnf -y install nlopt-devel
  • On Ubuntu (14.04+)
apt-get install -y libnlopt-dev
  • or, install NLopt library on any Unix-like system (GNU/Linux is fine) with a C compiler, using the standard procedure:
curl -O https://codeload.github.com/stevengj/nlopt/tar.gz/v2.7.0 && tar xzvf v2.7.0 && cd nlopt-2.7.0
cmake . && make && sudo make install

If you use pre-packaged binaries, you might want to either make symlink or a copy of libnlopt-0.dll library file as libnlopt.dll, e.g.:

mklink libnlopt.dll libnlopt-0.dll

If the C++ library is in a non-standard directory, or you are using Windows, make sure to export LIBRARY_PATH environment variable, e.g.:

export LIBRARY_PATH=/path/to/NLopt

or, on Windows:

set LIBRARY_PATH=C:\path\to\NLopt

Then install nlopt package.

go get -u github.com/go-nlopt/nlopt

Examples

Implementation of nonlinearly constrained problem from NLopt Tutorial

package main

import (
        "fmt"
        "github.com/go-nlopt/nlopt"
        "math"
)

func main() {
        opt, err := nlopt.NewNLopt(nlopt.LD_MMA, 2)
        if err != nil {
                panic(err)
        }
        defer opt.Destroy()

        opt.SetLowerBounds([]float64{math.Inf(-1), 0.})

        var evals int
        myfunc := func(x, gradient []float64) float64 {
                evals++
                if len(gradient) > 0 {
                        gradient[0] = 0.0
                        gradient[1] = 0.5 / math.Sqrt(x[1])
                }
                return math.Sqrt(x[1])
        }
        
        myconstraint := func(x, gradient []float64, a, b float64) float64 {
                if len(gradient) > 0 {
                        gradient[0] = 3*a* math.Pow(a*x[0]+b, 2.)
                        gradient[1] = -1.0
                }
                return math.Pow(a*x[0]+b, 3) - x[1]
        }

        opt.SetMinObjective(myfunc)
        opt.AddInequalityConstraint(func(x, gradient []float64) float64 { return myconstraint(x, gradient, 2., 0.)}, 1e-8)
        opt.AddInequalityConstraint(func(x, gradient []float64) float64 { return myconstraint(x, gradient, -1., 1.)}, 1e-8)
        opt.SetXtolRel(1e-4)

        x := []float64{1.234, 5.678}
        xopt, minf, err := opt.Optimize(x)
        if err != nil {
                panic(err)
        }
        fmt.Printf("found minimum after %d evaluations at f(%g,%g) = %0.10g\n", evals, xopt[0], xopt[1], minf)
}

Implementation of Nonlinear Least Squares Without Jacobian

package main

import (
        "fmt"
        "github.com/go-nlopt/nlopt"
        "math"
)

func main() {
        opt, err := nlopt.NewNLopt(nlopt.LN_BOBYQA, 2)
        if err != nil {
                panic(err)
        }
        defer opt.Destroy()

        k := []float64{1., 2., 3., 4., 5., 6., 7., 8., 9., 10.}
        
        var evals int
        myfun := func(x, gradient []float64) float64 {
                evals++
                f := make([]float64, len(k))
                for i := 0; i < len(k); i++ {
                        f[i] = 2 + 2*k[i] - math.Exp(k[i]*x[0]) - math.Exp(k[i]*x[1])
                }
                var chi2 float64
                for i := 0; i < len(f); i++ {
                        chi2 += (f[i] * f[i])
                }
                return chi2
        }
                  
        opt.SetMinObjective(myfun)
        opt.SetXtolRel(1e-8)
        opt.SetFtolRel(1e-8)

        x := []float64{0.3, 0.4}
        xopt, resnorm, err := opt.Optimize(x)
        if err != nil {
                panic(err)
        }
        fmt.Printf("BOBYQA: found minimum after %d evaluations at f(%g,%g) = %0.10g\n", evals, xopt[0], xopt[1], resnorm)
}

License

MIT - see LICENSE for more details.

Documentation

Index

Constants

View Source
const (

	// GN_DIRECT is DIRECT (global, no-derivative)
	GN_DIRECT = iota
	// GN_DIRECT_L is DIRECT-L (global, no-derivative)
	GN_DIRECT_L
	// GN_DIRECT_L_RAND is Randomized DIRECT-L (global, no-derivative)
	GN_DIRECT_L_RAND
	// GN_DIRECT_NOSCAL is Unscaled DIRECT (global, no-derivative)
	GN_DIRECT_NOSCAL
	// GN_DIRECT_L_NOSCAL is Unscaled DIRECT-L (global, no-derivative)
	GN_DIRECT_L_NOSCAL
	// GN_DIRECT_L_RAND_NOSCAL is Unscaled Randomized DIRECT-L (global, no-derivative)
	GN_DIRECT_L_RAND_NOSCAL
	// GN_ORIG_DIRECT is Original DIRECT version (global, no-derivative)
	GN_ORIG_DIRECT
	// GN_ORIG_DIRECT_L is Original DIRECT-L version (global, no-derivative)
	GN_ORIG_DIRECT_L
	// GD_STOGO is StoGO (NOT COMPILED)
	GD_STOGO
	// GD_STOGO_RAND is StoGO randomized (NOT COMPILED)
	GD_STOGO_RAND
	// LD_LBFGS_NOCEDAL is original L-BFGS code by Nocedal et al. (NOT COMPILED)
	LD_LBFGS_NOCEDAL
	// LD_LBFGS is Limited-memory BFGS (L-BFGS) (local, derivative-based)
	LD_LBFGS
	// LN_PRAXIS is Principal-axis, praxis (local, no-derivative)
	LN_PRAXIS
	// LD_VAR1 is Limited-memory variable-metric, rank 1 (local, derivative-based)
	LD_VAR1
	// LD_VAR2 is Limited-memory variable-metric, rank 2 (local, derivative-based)
	LD_VAR2
	// LD_TNEWTON is Truncated Newton (local, derivative-based)
	LD_TNEWTON
	// LD_TNEWTON_RESTART is Truncated Newton with restarting (local, derivative-based)
	LD_TNEWTON_RESTART
	// LD_TNEWTON_PRECOND is Preconditioned truncated Newton (local, derivative-based)
	LD_TNEWTON_PRECOND
	// LD_TNEWTON_PRECOND_RESTART is Preconditioned truncated Newton with restarting (local, derivative-based)
	LD_TNEWTON_PRECOND_RESTART
	// GN_CRS2_LM is Controlled random search (CRS2) with local mutation (global, no-derivative)
	GN_CRS2_LM
	// GN_MLSL is Multi-level single-linkage (MLSL), random (global, no-derivative)
	GN_MLSL
	// GD_MLSL is Multi-level single-linkage (MLSL), random (global, derivative)
	GD_MLSL
	// GN_MLSL_LDS is Multi-level single-linkage (MLSL), quasi-random (global, no-derivative)
	GN_MLSL_LDS
	// GD_MLSL_LDS is Multi-level single-linkage (MLSL), quasi-random (global, derivative)
	GD_MLSL_LDS
	// LD_MMA is Method of Moving Asymptotes (MMA) (local, derivative)
	LD_MMA
	// LN_COBYLA is COBYLA (Constrained Optimization BY Linear Approximations) (local, no-derivative)
	LN_COBYLA
	// LN_NEWUOA is NEWUOA unconstrained optimization via quadratic models (local, no-derivative)
	LN_NEWUOA
	// LN_NEWUOA_BOUND is Bound-constrained optimization via NEWUOA-based quadratic models (local, no-derivative)
	LN_NEWUOA_BOUND
	// LN_NELDERMEAD is Nelder-Mead simplex algorithm (local, no-derivative)
	LN_NELDERMEAD
	// LN_SBPLX is Sbplx variant of Nelder-Mead (re-implementation of Rowan's Subplex) (local, no-derivative)
	LN_SBPLX
	// LN_AUGLAG is Augmented Lagrangian method (local, no-derivative)
	LN_AUGLAG
	// LD_AUGLAG is Augmented Lagrangian method (local, derivative)
	LD_AUGLAG
	// LN_AUGLAG_EQ is Augmented Lagrangian method for equality constraints (local, no-derivative)
	LN_AUGLAG_EQ
	// LD_AUGLAG_EQ is Augmented Lagrangian method for equality constraints (local, derivative)
	LD_AUGLAG_EQ
	// LN_BOBYQA is BOBYQA bound-constrained optimization via quadratic models (local, no-derivative)
	LN_BOBYQA
	// GN_ISRES is ISRES evolutionary constrained optimization (global, no-derivative)
	GN_ISRES
	// AUGLAG is Augmented Lagrangian method (needs sub-algorithm)
	AUGLAG
	// AUGLAG_EQ is Augmented Lagrangian method for equality constraints (needs sub-algorithm)
	AUGLAG_EQ
	// G_MLSL is Multi-level single-linkage (MLSL), random (global, needs sub-algorithm)
	G_MLSL
	// G_MLSL_LDS is Multi-level single-linkage (MLSL), quasi-random (global, needs sub-algorithm)
	G_MLSL_LDS
	// LD_SLSQP is Sequential Quadratic Programming (SQP) (local, derivative)
	LD_SLSQP
	// LD_CCSAQ is CCSA (Conservative Convex Separable Approximations) with simple quadratic approximations (local, derivative)
	LD_CCSAQ
	// GN_ESCH is ESCH evolutionary strategy
	GN_ESCH
	// NUM_ALGORITHMS is number of algorithms
	NUM_ALGORITHMS
)

Variables

This section is empty.

Functions

func AlgorithmName

func AlgorithmName(algorithm int) string

AlgorithmName returns a descriptive string corresponding to a particular algorithm `algorithm`

func Srand

func Srand(seed uint64)

Srand allows to use a "deterministic" sequence of pseudorandom numbers, i.e. the same sequence from run to run. For stochastic optimization algorithms, a pseudorandom numbers generated by the Mersenne Twister algorithm are used. By default, the seed for the random numbers is generated from the system time, so that you will get a different sequence of pseudorandom numbers each time you run your program.

Some of the algorithms also support using low-discrepancy sequences (LDS), sometimes known as quasi-random numbers. NLopt uses the Sobol LDS, which is implemented for up to 1111 dimensions.

func SrandTime

func SrandTime()

SrandTime resets the seed based on the system time. Normally, you don't need to call this as it is called automatically. However, it might be useful if you want to "re-randomize" the pseudorandom numbers after calling nlopt.Srand to set a deterministic seed.

func Version

func Version() string

Version determines the version number of NLopt at runtime

Types

type Func

type Func func(x, gradient []float64) float64

Func is an objective function to minimize or maximize. The return should be the value of the function at the point x, where x points to a slice of length n of the optimization parameters. If the argument gradient is not <nil> or empty then it points to a slice of length n, which should (upon return) be set in-place to the gradient of the function with respect to the optimization parameters at x

type Mfunc

type Mfunc func(result, x, gradient []float64)

Mfunc is a vector-valued objective function for applications where it is more convenient to define a single function that returns the values (and gradients) of all constraints at once. Upon return the output value of the constraints should be stored in result, a slice of length m (the same as the dimension passed to nlopt..Add*MConstraint). In addition, if gradient is non-<nil>, then gradient points to a slice of length m*n which should, upon return, be set to the gradients of the constraint functions with respect to x.

type NLopt

type NLopt struct {
	// contains filtered or unexported fields
}

NLopt keeps a C.nlopt_opt "object" (an opaque pointer), then set various optimization parameters, and then execute the algorithm.

func NewNLopt

func NewNLopt(algorithm int, n uint) (*NLopt, error)

NewNLopt returns a newly allocated nlopt_opt object given an algorithm and the dimensionality of the problem `n` (the number of optimization parameters)

func (*NLopt) AddEqualityConstraint

func (n *NLopt) AddEqualityConstraint(h Func, tol float64) error

AddEqualityConstraint adds an arbitrary nonlinear equality constraint h. The functionality is supported by ISRES and AUGLAG algorithms. The parameter tol is a tolerance used for the purpose of stopping criteria only.

func (*NLopt) AddEqualityMConstraint

func (n *NLopt) AddEqualityMConstraint(h Mfunc, tol []float64) error

AddEqualityMConstraint adds vector-valued equality constraint h. Slice tol points to a slice of length m of the tolerances in each constraint dimension (or <nil> for zero tolerances)

func (*NLopt) AddInequalityConstraint

func (n *NLopt) AddInequalityConstraint(fc Func, tol float64) error

AddInequalityConstraint adds an arbitrary nonlinear inequality constraint fc. The functionality is supported by MMA, COBYLA and ORIG_DIRECT algorithms. The parameter tol is a tolerance used for the purpose of stopping criteria only.

func (*NLopt) AddInequalityMConstraint

func (n *NLopt) AddInequalityMConstraint(fc Mfunc, tol []float64) error

AddInequalityMConstraint adds vector-valued inequality constraint fc. Slice tol points to a slice of length m of the tolerances in each constraint dimension (or <nil> for zero tolerances)

func (*NLopt) Copy

func (n *NLopt) Copy() *NLopt

Copy makes an independent copy of an object

func (*NLopt) Destroy

func (n *NLopt) Destroy()

Destroy deallocates nlopt_opt object and frees all reserved resources

func (*NLopt) ForceStop

func (n *NLopt) ForceStop() error

ForceStop allows caller to force the optimization to halt, for some reason unknown to NLopt. This causes nlopt..Optimize to halt, returning the FORCED_STOP error. It has no effect if not called during nlopt..Optimize.

func (*NLopt) GetAlgorithm

func (n *NLopt) GetAlgorithm() int

GetAlgorithm returns an immutable algorithm id parameter for this instance

func (*NLopt) GetAlgorithmName

func (n *NLopt) GetAlgorithmName() string

GetAlgorithm returns a descriptive immutable algorithm name for this instance

func (*NLopt) GetDimension

func (n *NLopt) GetDimension() uint

GetDimension returns an immutable dimension parameter for this instance

func (*NLopt) GetForceStop

func (n *NLopt) GetForceStop() int

GetForceStop retrieves last forced-stop value that was set since the last nlopt..Optimize. The force-stop value is reset to zero at the beginning of nlopt..Optimize.

func (*NLopt) GetFtolAbs

func (n *NLopt) GetFtolAbs() float64

GetFtolAbs retrieves the current value for absolute function value tolerance criterion

func (*NLopt) GetFtolRel

func (n *NLopt) GetFtolRel() float64

GetFtolRel retrieves the current value for relative function value tolerance criterion

func (*NLopt) GetInitialStep

func (n *NLopt) GetInitialStep() ([]float64, []float64, error)

GetInitialStep retrieves the initial step size. The first alice is the same as the initial guess that you plan to pass to nlopt.NLopt.Optimize – if you have not set the initial step and NLopt is using its heuristics, its heuristic step size may depend on the initial x, which is why you must pass it here. Both slices are of length n (the dimension of the problem from nlopt.NewNLopt), where the latter on successful return contains the initial step sizes.

func (*NLopt) GetLowerBounds

func (n *NLopt) GetLowerBounds() ([]float64, error)

GetLowerBounds returns lower bounds. It is possible not to have lower bounds set. The size of return slice is n (the dimension of the problem)

func (*NLopt) GetMaxEval

func (n *NLopt) GetMaxEval() int

GetMaxEval retrieves the current value for maxeval criterion

func (*NLopt) GetMaxTime

func (n *NLopt) GetMaxTime() float64

GetMaxTime retrieves the current value for maxtime criterion

func (*NLopt) GetParam

func (n *NLopt) GetParam(name string, defaultVal float64) float64

GetParam gets an internal algorithm parameter by name

func (*NLopt) GetPopulation

func (n *NLopt) GetPopulation() uint

GetPopulation retrieves initial "population" of random points x

func (*NLopt) GetStopVal

func (n *NLopt) GetStopVal() float64

GetStopVal retrieves the current value for stopval criterion

func (*NLopt) GetUpperBounds

func (n *NLopt) GetUpperBounds() ([]float64, error)

GetUpperBounds returns upper bounds. It is possible not to have upper bounds set. The size of return slice is n (the dimension of the problem)

func (*NLopt) GetVectorStorage

func (n *NLopt) GetVectorStorage() uint

GetVectorStorage retrieves size of vector storage

func (*NLopt) GetXtolAbs

func (n *NLopt) GetXtolAbs() ([]float64, error)

GetXtolAbs retrieves the current value for absolute tolerances on optimization parameters criterion

func (*NLopt) GetXtolRel

func (n *NLopt) GetXtolRel() float64

GetXtolRel retrieves the current value for relative tolerance on optimization parameters criterion

func (*NLopt) HasParam

func (n *NLopt) HasParam(name string) int

HasParam checks whether an internal algorithm parameter has been set

func (*NLopt) LastStatus

func (n *NLopt) LastStatus() string

func (*NLopt) NthParam

func (n *NLopt) NthParam(idx uint) string

NthParam gets an internal algorithm parameter for provided index

func (*NLopt) NumParams

func (n *NLopt) NumParams() uint

HasParam returns number of internal algorithm parameters

func (*NLopt) Optimize

func (n *NLopt) Optimize(x []float64) ([]float64, float64, error)

Optimize performs optimization once all of the desired optimization parameters have been specified in a given object. Input, x is a slice of length n (the dimension of the problem from nlopt..NewNLopt) giving an initial guess for the optimization parameters. On successful return, a slice contains the optimized values of the parameters, and value contains the corresponding value of the objective function.

func (*NLopt) RemoveEqualityConstraints

func (n *NLopt) RemoveEqualityConstraints() error

RemoveEqualityConstraints removes all equality constraints

func (*NLopt) RemoveInequalityConstraints

func (n *NLopt) RemoveInequalityConstraints() error

RemoveEqualityConstraints removes all inequality constraints

func (*NLopt) SetDefaultInitialStep

func (n *NLopt) SetDefaultInitialStep(x []float64) error

func (*NLopt) SetForceStop

func (n *NLopt) SetForceStop(val int) error

SetForceStop sets a forced-stop integer value val, which can be later retrieved. Passing val=0 to nlopt..SetForceStop tells NLopt not to force a halt.

func (*NLopt) SetFtolAbs

func (n *NLopt) SetFtolAbs(tol float64) error

SetFtolAbs sets absolute tolerance on function value: stop when an optimization step (or an estimate of the optimum) changes the function value by less than tol. Criterion is disabled if tol is non-positive.

func (*NLopt) SetFtolRel

func (n *NLopt) SetFtolRel(tol float64) error

SetFtolRel sets relative tolerance on function value: stop when an optimization step (or an estimate of the optimum) changes the objective function value by less than tol multiplied by the absolute value of the function value. Criterion is disabled if tol is non-positive.

func (*NLopt) SetInitialStep

func (n *NLopt) SetInitialStep(dx []float64) error

SetInitialStep sets initial step size to perturb x by when optimizer begins the optimization for derivative-free local-optimization algorithms. This step size should be big enough that the value of the objective changes significantly, but not too big if you want to find the local optimum nearest to x. By default, NLopt chooses this initial step size heuristically from the bounds, tolerances, and other information, but this may not always be the best choice. Parameter dx is a slice of length n (the dimension of the problem from nlopt..NewNLopt) containing the (nonzero) initial step size for each component of the optimization parameters x. If you pass <nil> for dx, then NLopt will use its heuristics to determine the initial step size.

func (*NLopt) SetInitialStep1

func (n *NLopt) SetInitialStep1(dx float64) error

SetInitialStep1 sets initial step size to perturb x by when optimizer begins the optimization for derivative-free local-optimization algorithms to the same value in every direction.

func (*NLopt) SetLocalOptimizer

func (n *NLopt) SetLocalOptimizer(localOpt *NLopt) error

SetLocalOptimizer sets a different optimization algorithm as a subroutine for algorithms like MLSL and AUGLAG. Here localOpt is another nlopt.NLopt object whose parameters are used to determine the local search algorithm, its stopping criteria, and other algorithm parameters. (However, the objective function, bounds, and nonlinear-constraint parameters of localOpt are ignored.) The dimension n of localOpt must match that of opt.

func (*NLopt) SetLowerBounds

func (n *NLopt) SetLowerBounds(lb []float64) error

SetLowerBounds sets lower bounds that an objective function and any nonlinear constraints will never be evaluated outside of these bounds. Bounds are set by passing a slice lb of length n (the dimension of the problem)

func (*NLopt) SetLowerBounds1

func (n *NLopt) SetLowerBounds1(lb float64) error

SetLowerBounds1 sets lower bounds to a single constant for all optimization parameters

func (*NLopt) SetMaxEval

func (n *NLopt) SetMaxEval(maxeval int) error

SetMaxEval sets a criterion to stop when the number of function evaluations exceeds maxeval. Criterion is disabled if maxeval is non-positive.

func (*NLopt) SetMaxObjective

func (n *NLopt) SetMaxObjective(f Func) error

SetMaxObjective sets the objective function f to maximize

func (*NLopt) SetMaxTime

func (n *NLopt) SetMaxTime(maxtime float64) error

SetMaxTime sets a criterion to stop when the optimization time (in seconds) exceeds maxtime. Criterion is disabled if maxtime is non-positive.

func (*NLopt) SetMinObjective

func (n *NLopt) SetMinObjective(f Func) error

SetMinObjective sets the objective function f to minimize

func (*NLopt) SetParam

func (n *NLopt) SetParam(name string, val float64) error

SetParam sets an internal algorithm parameter

func (*NLopt) SetPopulation

func (n *NLopt) SetPopulation(pop uint) error

SetPopulation sets an initial "population" of random points x for several of the stochastic search algorithms (e.g., CRS, MLSL, and ISRES). By default, this initial population size is chosen heuristically in some algorithm-specific way. A pop of zero implies that the heuristic default will be used.

func (*NLopt) SetStopVal

func (n *NLopt) SetStopVal(stopval float64) error

SetStopVal sets a criterion to stop when an objective value of at least stopval is found: stop minimizing when an objective value ≤ stopval is found, or stop maximizing a value ≥ stopval is found.

func (*NLopt) SetUpperBounds

func (n *NLopt) SetUpperBounds(ub []float64) error

SetUpperBounds sets upper bounds that an objective function and any nonlinear constraints will never be evaluated outside of these bounds. Bounds are set by passing a slice lb of length n (the dimension of the problem)

func (*NLopt) SetUpperBounds1

func (n *NLopt) SetUpperBounds1(ub float64) error

SetLowerBounds1 sets upper bounds to a single constant for all optimization parameters

func (*NLopt) SetVectorStorage

func (n *NLopt) SetVectorStorage(M uint) error

SetVectorStorage for some of the NLopt algorithms that are limited-memory "quasi-Newton" algorithms, which "remember" the gradients from a finite number M of the previous optimization steps in order to construct an approximate 2nd derivative matrix. The bigger M is, the more storage the algorithms require, but on the other hand they may converge faster for larger M. By default, NLopt chooses a heuristic value of M.

Passing M=0 (the default) tells NLopt to use a heuristic value. By default, NLopt currently sets M to 10 or at most 10 MiB worth of vectors, whichever is larger.

func (*NLopt) SetXtolAbs

func (n *NLopt) SetXtolAbs(tol []float64) error

SetXtolAbs sets absolute tolerances on optimization parameters. tol is a slice of length n (the dimension from NewNLopt) giving the tolerances: stop when an optimization step (or an estimate of the optimum) changes every parameter x[i] by less than tol[i]

func (*NLopt) SetXtolAbs1

func (n *NLopt) SetXtolAbs1(tol float64) error

SetXtolAbs1 sets the absolute tolerances in all n optimization parameters to the same value tol. Criterion is disabled if tol is non-positive.

func (*NLopt) SetXtolRel

func (n *NLopt) SetXtolRel(tol float64) error

SetXtolRel sets relative tolerance on optimization parameters: stop when an optimization step (or an estimate of the optimum) changes every parameter by less than tol multiplied by the absolute value of the parameter. Criterion is disabled if tol is non-positive.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL