lr

package
Version: v0.0.0-...-d942b2f Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 21, 2014 License: MIT Imports: 8 Imported by: 0

Documentation

Index

Constants

View Source
const MAX_BACKTRACKING_ITER = 50

Variables

This section is empty.

Functions

func NewRelativeMeanImprCriterion

func NewRelativeMeanImprCriterion(tolerance float64) *relativeMeanImprCriterion

Types

type DiffFunction

type DiffFunction interface {
	Value(pos *core.Vector) float64
	Gradient(pos *core.Vector) *core.Vector
}

Description: function for minimizer such as LBFGS and OWLQN

type EPLogisticRegression

type EPLogisticRegression struct {
	Model map[int64]*util.Gaussian
	// contains filtered or unexported fields
}

func (*EPLogisticRegression) Clear

func (algo *EPLogisticRegression) Clear()

func (*EPLogisticRegression) Init

func (algo *EPLogisticRegression) Init(params map[string]string)

func (*EPLogisticRegression) LoadModel

func (algo *EPLogisticRegression) LoadModel(path string)

func (*EPLogisticRegression) Predict

func (algo *EPLogisticRegression) Predict(sample *core.Sample) float64

func (*EPLogisticRegression) SaveModel

func (algo *EPLogisticRegression) SaveModel(path string)

func (*EPLogisticRegression) Train

func (algo *EPLogisticRegression) Train(dataset *core.DataSet)

type EPLogisticRegressionParams

type EPLogisticRegressionParams struct {
	// contains filtered or unexported fields
}

type FTRLFeatureWeight

type FTRLFeatureWeight struct {
	// contains filtered or unexported fields
}

func (*FTRLFeatureWeight) Wi

type FTRLLogisticRegression

type FTRLLogisticRegression struct {
	Model  map[int64]FTRLFeatureWeight
	Params FTRLLogisticRegressionParams
}

func (*FTRLLogisticRegression) Clear

func (algo *FTRLLogisticRegression) Clear()

func (*FTRLLogisticRegression) Init

func (algo *FTRLLogisticRegression) Init(params map[string]string)

func (*FTRLLogisticRegression) LoadModel

func (algo *FTRLLogisticRegression) LoadModel(path string)

func (*FTRLLogisticRegression) Predict

func (algo *FTRLLogisticRegression) Predict(sample *core.Sample) float64

func (*FTRLLogisticRegression) SaveModel

func (algo *FTRLLogisticRegression) SaveModel(path string)

func (*FTRLLogisticRegression) Train

func (algo *FTRLLogisticRegression) Train(dataset *core.DataSet)

type FTRLLogisticRegressionParams

type FTRLLogisticRegressionParams struct {
	Alpha, Beta, Lambda1, Lambda2 float64
	Steps                         int
}

type LBFGSMinimizer

type LBFGSMinimizer struct {
	// contains filtered or unexported fields
}

*

* It's based the paper "Scalable Training of L1-Regularized Log-Linear Models"
* by Galen Andrew and Jianfeng Gao
* user: weixuan
* To change this template use File | Settings | File Templates.

func NewLBFGSMinimizer

func NewLBFGSMinimizer() *LBFGSMinimizer

func (*LBFGSMinimizer) Evaluate

func (m *LBFGSMinimizer) Evaluate(pos *core.Vector) float64

func (*LBFGSMinimizer) Minimize

func (m *LBFGSMinimizer) Minimize(costfun DiffFunction, init *core.Vector) *core.Vector

func (*LBFGSMinimizer) NextPoint

func (m *LBFGSMinimizer) NextPoint(curPos *core.Vector, dir *core.Vector, alpha float64) *core.Vector

type LROWLQN

type LROWLQN struct {
	Model  *core.Vector
	Params LROWLQNParams
	// contains filtered or unexported fields
}

func (*LROWLQN) Equals

func (lr *LROWLQN) Equals(x *core.Vector, y *core.Vector) bool

func (*LROWLQN) Gradient

func (lr *LROWLQN) Gradient(pos *core.Vector) *core.Vector

func (*LROWLQN) Init

func (lr *LROWLQN) Init(params map[string]string)

func (*LROWLQN) LoadModel

func (lr *LROWLQN) LoadModel(path string)

func (*LROWLQN) Predict

func (lr *LROWLQN) Predict(sample *core.Sample) float64

func (*LROWLQN) SaveModel

func (lr *LROWLQN) SaveModel(path string)

func (*LROWLQN) Train

func (lr *LROWLQN) Train(dataset *core.DataSet)

func (*LROWLQN) Value

func (lr *LROWLQN) Value(pos *core.Vector) float64

type LROWLQNParams

type LROWLQNParams struct {
	Regularization float64
}

type LinearRegression

type LinearRegression struct {
	Model  map[int64]float64
	Params LogisticRegressionParams
}

func (*LinearRegression) Init

func (algo *LinearRegression) Init(params map[string]string)

func (*LinearRegression) LoadModel

func (algo *LinearRegression) LoadModel(path string)

func (*LinearRegression) Predict

func (algo *LinearRegression) Predict(sample *core.Sample) float64

func (*LinearRegression) SaveModel

func (algo *LinearRegression) SaveModel(path string)

func (*LinearRegression) Train

func (algo *LinearRegression) Train(dataset *core.DataSet)

type LogisticRegression

type LogisticRegression struct {
	Model  map[int64]float64
	Params LogisticRegressionParams
}

func (*LogisticRegression) Init

func (algo *LogisticRegression) Init(params map[string]string)

func (*LogisticRegression) LoadModel

func (algo *LogisticRegression) LoadModel(path string)

func (*LogisticRegression) Predict

func (algo *LogisticRegression) Predict(sample *core.Sample) float64

func (*LogisticRegression) SaveModel

func (algo *LogisticRegression) SaveModel(path string)

func (*LogisticRegression) Train

func (algo *LogisticRegression) Train(dataset *core.DataSet)

type LogisticRegressionParams

type LogisticRegressionParams struct {
	LearningRate   float64
	Regularization float64
	Steps          int
}

type Minimizer

type Minimizer interface {
	NextPoint(curPos *core.Vector, dir *core.Vector, alpha float64) *core.Vector
	Evaluate(curPos *core.Vector) float64
}

type OWLQNMinimizer

type OWLQNMinimizer struct {
	// contains filtered or unexported fields
}

*

* It's based the paper "Scalable Training of L1-Regularized Log-Linear Models"
* by Galen Andrew and Jianfeng Gao
* user: weixuan
* To change this template use File | Settings | File Templates.

func NewOWLQNMinimizer

func NewOWLQNMinimizer(l1reg float64) *OWLQNMinimizer

func (*OWLQNMinimizer) Evaluate

func (m *OWLQNMinimizer) Evaluate(pos *core.Vector) float64

func (*OWLQNMinimizer) Minimize

func (m *OWLQNMinimizer) Minimize(costfun DiffFunction, init *core.Vector) *core.Vector

func (*OWLQNMinimizer) NextPoint

func (m *OWLQNMinimizer) NextPoint(curPos *core.Vector, dir *core.Vector, alpha float64) *core.Vector

type QuasiNewtonHelper

type QuasiNewtonHelper struct {
	// contains filtered or unexported fields
}

*

* It's based the paper "Scalable Training of L1-Regularized Log-Linear Models"
* by Galen Andrew and Jianfeng Gao
* user: weixuan
* To change this template use File | Settings | File Templates.

func NewQuasiNewtonHelper

func NewQuasiNewtonHelper(numHist int, minimizer Minimizer, curPos *core.Vector, curGrad *core.Vector) *QuasiNewtonHelper

Description: the pos and gradient arguments should NOT be modified outside

func (*QuasiNewtonHelper) ApplyQuasiInverseHession

func (h *QuasiNewtonHelper) ApplyQuasiInverseHession(dir *core.Vector)

Description: Update the dir from -grad to optimal direction

Dir will be modified directly

func (*QuasiNewtonHelper) BackTrackingLineSearch

func (h *QuasiNewtonHelper) BackTrackingLineSearch(cost float64, pos *core.Vector, grad *core.Vector, dir *core.Vector, isInit bool) (nextCost float64, nextPos *core.Vector)

func (*QuasiNewtonHelper) UpdateState

func (h *QuasiNewtonHelper) UpdateState(nextPos *core.Vector, nextGrad *core.Vector) (isOptimal bool)

Description: the pos and gradient arguments should NOT be modified outside

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
t or T : Toggle theme light dark auto
y or Y : Canonical URL