Back to godoc.org

Package metrics

v0.0.0-...-a35b0cb
Latest Go to latest

The latest major version is .

Published: Feb 10, 2020 | License: MIT | Module: github.com/RobinRCM/sklearn

Overview

Package metrics includes score functions, performance metrics and pairwise metrics and distance computations.

Index

Examples

func AUC

func AUC(fpr, tpr []float64) float64

AUC Compute Area Under the Curve (AUC) using the trapezoidal rule

Example

Code:

// example adapted from https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/metrics/ranking.py#L453
Y := mat.NewDense(4, 1, []float64{1, 1, 2, 2})
scores := mat.NewDense(4, 1, []float64{.1, .4, .35, .8})
fpr, tpr, _ := ROCCurve(Y, scores, 2., nil)
fmt.Println("auc:", AUC(fpr, tpr))
auc: 0.75

func AccuracyScore

func AccuracyScore(Ytrue, Ypred mat.Matrix, normalize bool, sampleWeight *mat.Dense) float64

AccuracyScore reports (weighted) true values/nSamples

Example

Code:

// adapted from example in https://github.com/scikit-learn/scikit-learn/blob/0.19.1/sklearn/metrics/classification.py
var nilDense *mat.Dense
normalize, sampleWeight := true, nilDense
Ypred, Ytrue := mat.NewDense(4, 1, []float64{0, 2, 1, 3}), mat.NewDense(4, 1, []float64{0, 1, 2, 3})
fmt.Println(AccuracyScore(Ytrue, Ypred, normalize, sampleWeight))
fmt.Println(AccuracyScore(mat.NewDense(2, 2, []float64{0, 1, 1, 1}), mat.NewDense(2, 2, []float64{1, 1, 1, 1}), normalize, sampleWeight))
0.5
0.5

func AveragePrecisionScore

func AveragePrecisionScore(Ytrue, Yscore *mat.Dense, average string, sampleWeight []float64) float64

AveragePrecisionScore compute average precision (AP) from prediction scores

Example

Code:

// adapted from https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/metrics/ranking.py#L180
Ytrue := mat.NewDense(4, 1, []float64{0, 0, 1, 1})
Yscores := mat.NewDense(4, 1, []float64{.1, .4, .35, .8})
fmt.Printf("AveragePrecisionScore : %.2f\n", AveragePrecisionScore(Ytrue, Yscores, "macro", nil))

// import numpy as np
// from sklearn.metrics import average_precision_score
// y_true = np.array([[0,0], [0,0], [1,1], [1,1]])
// y_scores = np.array([[0.1,.5],[ 0.4,.4], [0.35,.35], [0.8,.4]])
// average_precision_score(y_true, y_scores,"macro"),average_precision_score(y_true, y_scores,"micro")
/*
	np.average( [0.83333333 0.41666667] , None )
	np.average( [0.63571429] , None )
	(0.625, 0.6357142857142857)
*/
*/

//FIXME
Ytrue = mat.NewDense(4, 2, []float64{0, 0, 0, 0, 1, 1, 1, 1})
Yscores = mat.NewDense(4, 2, []float64{.1, .5, .4, .4, .35, .35, .8, .4})
fmt.Printf("AveragePrecisionScore macro: %.3f\n", AveragePrecisionScore(Ytrue, Yscores, "macro", nil))
fmt.Printf("AveragePrecisionScore micro: %.3f\n", AveragePrecisionScore(Ytrue, Yscores, "micro", []float64{1, 1, 1, 1}))
AveragePrecisionScore : 0.83
AveragePrecisionScore macro: 0.625
AveragePrecisionScore micro: 0.636

func ConfusionMatrix

func ConfusionMatrix(YTrue, YPred *mat.Dense, sampleWeight []float64) *mat.Dense

ConfusionMatrix Compute confusion matrix to evaluate the accuracy of a classification operate only on 1st Y column uses preprocessing.LabelEncoder to map class values to class indices

Example

Code:

// adapted from example in http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html#sklearn.metrics.confusion_matrix
YTrue := mat.NewDense(6, 1, []float64{2, 0, 2, 2, 0, 1})
YPred := mat.NewDense(6, 1, []float64{0, 0, 2, 2, 0, 2})
fmt.Println(mat.Formatted(ConfusionMatrix(YTrue, YPred, nil)))
⎡2  0  0⎤
⎢0  0  1⎥
⎣1  0  2⎦

func F1Score

func F1Score(Ytrue, Ypred *mat.Dense, average string, sampleWeight []float64) float64

F1Score v https://en.wikipedia.org/wiki/F1_score average must be macro|micro|weighted.. //TODO binary,samples

Example

Code:

// adapted from example in https://github.com/scikit-learn/scikit-learn/blob/0.19.1/sklearn/metrics/classification.py
Ytrue, Ypred := mat.NewDense(6, 1, []float64{0, 1, 2, 0, 1, 2}), mat.NewDense(6, 1, []float64{0, 2, 1, 0, 0, 1})
var sampleWeight []float64
fmt.Printf("%.2f\n", F1Score(Ytrue, Ypred, "macro", sampleWeight))
fmt.Printf("%.2f\n", F1Score(Ytrue, Ypred, "micro", sampleWeight))
fmt.Printf("%.2f\n", F1Score(Ytrue, Ypred, "weighted", sampleWeight))
0.27
0.33
0.27

func FBetaScore

func FBetaScore(Ytrue, Ypred *mat.Dense, beta float64, average string, sampleWeight []float64) float64

FBetaScore is the weighted harmonic mean of precision and recall,

reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall)

average must be macro|micro|weighted.. //TODO binary,samples

Example

Code:

// adapted from example in https://github.com/scikit-learn/scikit-learn/blob/0.19.1/sklearn/metrics/classification.py
Ytrue, Ypred := mat.NewDense(6, 1, []float64{0, 1, 2, 0, 1, 2}), mat.NewDense(6, 1, []float64{0, 2, 1, 0, 0, 1})
var sampleWeight []float64
fmt.Printf("%.2f\n", FBetaScore(Ytrue, Ypred, .5, "macro", sampleWeight))
fmt.Printf("%.2f\n", FBetaScore(Ytrue, Ypred, .5, "micro", sampleWeight))
fmt.Printf("%.2f\n", FBetaScore(Ytrue, Ypred, .5, "weighted", sampleWeight))
0.24
0.33
0.24

func MeanAbsoluteError

func MeanAbsoluteError(yTrue, yPred mat.Matrix, sampleWeight *mat.Dense, multioutput string) *mat.Dense

MeanAbsoluteError regression loss Read more in the :ref:`User Guide <mean_absolute_error>`. Parameters ---------- y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)

Ground truth (correct) target values.

y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)

Estimated target values.

sample_weight : array-like of shape = (n_samples), optional

Sample weights.

multioutput : string in ['raw_values', 'uniform_average']

or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
    Returns a full set of errors in case of multioutput input.
'uniform_average' :
    Errors of all outputs are averaged with uniform weight.

Returns ------- loss : float or ndarray of floats

If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.

Examples -------- >>> from sklearn.metrics import mean_absolute_error >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> mean_absolute_error(y_true, y_pred) 0.5 >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] >>> y_pred = [[0, 2], [-1, 2], [8, -5]] >>> mean_absolute_error(y_true, y_pred) 0.75 >>> mean_absolute_error(y_true, y_pred, multioutput='raw_values') array([ 0.5, 1. ]) >>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7]) ... # doctest: +ELLIPSIS 0.849...

func MeanSquaredError

func MeanSquaredError(yTrue, yPred mat.Matrix, sampleWeight *mat.Dense, multioutput string) *mat.Dense

MeanSquaredError regression loss Read more in the :ref:`User Guide <mean_squared_error>`. Parameters ---------- y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)

Ground truth (correct) target values.

y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)

Estimated target values.

sample_weight : array-like of shape = (n_samples), optional

Sample weights.

multioutput : string in ['raw_values', 'uniform_average']

or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
    Returns a full set of errors in case of multioutput input.
'uniform_average' :
    Errors of all outputs are averaged with uniform weight.

Returns ------- loss : float or ndarray of floats

A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.

func PrecisionRecallCurve

func PrecisionRecallCurve(Ytrue, ProbasPred *mat.Dense, posLabel float64, sampleWeight []float64) (precision, recall, thresholds []float64)

PrecisionRecallCurve compute precision-recall pairs for different probability thresholds

Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold.  This ensures that the graph starts on the
x axis.
Parameters
y_true : array, shape = [n_samples]
    True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
    Estimated probabilities or decision function.
pos_label : int or str, default=None
    The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
    Sample weights.
Returns
precision : array, shape = [n_thresholds + 1]
    Precision values such that element i is the precision of
    predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
    Decreasing recall values such that element i is the recall of
    predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
    Increasing thresholds on the decision function used to compute
    precision and recall.
Example

Code:

// example adapted from https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/metrics/ranking.py#L423
Ytrue := mat.NewDense(4, 1, []float64{0, 0, 1, 1})
Yscores := mat.NewDense(4, 1, []float64{.1, .4, .35, .8})
precision, recall, thresholds := PrecisionRecallCurve(Ytrue, Yscores, 1, nil)
fmt.Printf("precision: %.3g\n", precision)
fmt.Printf("recall: %.3g\n", recall)
fmt.Printf("thresholds: %.3g\n", thresholds)
precision: [0.667 0.5 1 1]
recall: [1 0.5 0.5 0]
thresholds: [0.35 0.4 0.8]

func PrecisionRecallFScoreSupport

func PrecisionRecallFScoreSupport(YTrue, YPred *mat.Dense, beta float64, labels []float64, posLabel int, average string, warnFor []string, sampleWeight []float64) (precision, recall, fscore, support float64)

PrecisionRecallFScoreSupport Compute precision, recall, F-measure and support for each class operate only on 1st Y column average must be macro|micro|weighted.. //TODO binary,samples posLabel is -1 or index of classes (index of class in ordered unique class values). if posLabel>=0, restuls are returned for the respective class only

Example

Code:

// adapted from example in https://github.com/scikit-learn/scikit-learn/blob/0.19.1/sklearn/metrics/classification.py
var precision, recall, fscore, support float64
// cat, dog, pig := 0., 1., 2.
Ytrue, Ypred := mat.NewDense(6, 1, []float64{0, 1, 2, 0, 1, 2}), mat.NewDense(6, 1, []float64{0, 2, 1, 0, 0, 1})
precision, recall, fscore, support = PrecisionRecallFScoreSupport(Ytrue, Ypred, 1, nil, -1, "macro", nil, nil)
fmt.Printf("macro %.2f\n", []float64{precision, recall, fscore, support})
precision, recall, fscore, support = PrecisionRecallFScoreSupport(Ytrue, Ypred, 1, nil, -1, "micro", nil, nil)
fmt.Printf("micro %.2f\n", []float64{precision, recall, fscore, support})
precision, recall, fscore, support = PrecisionRecallFScoreSupport(Ytrue, Ypred, 1, nil, -1, "weighted", nil, nil)
fmt.Printf("weighted %.2f\n", []float64{precision, recall, fscore, support})
macro [0.22 0.33 0.27 0.00]
micro [0.33 0.33 0.33 0.00]
weighted [0.22 0.33 0.27 0.00]

func PrecisionScore

func PrecisionScore(Ytrue, Ypred *mat.Dense, average string, sampleWeight []float64) float64

PrecisionScore v https://en.wikipedia.org/wiki/F1_score average must be macro|micro|weighted.. //TODO binary,samples

Example

Code:

// adapted from example in https://github.com/scikit-learn/scikit-learn/blob/0.19.1/sklearn/metrics/classification.py
Ytrue, Ypred := mat.NewDense(6, 1, []float64{0, 1, 2, 0, 1, 2}), mat.NewDense(6, 1, []float64{0, 2, 1, 0, 0, 1})
var sampleWeight []float64
fmt.Printf("%.2f\n", PrecisionScore(Ytrue, Ypred, "macro", sampleWeight))
fmt.Printf("%.2f\n", PrecisionScore(Ytrue, Ypred, "micro", sampleWeight))
fmt.Printf("%.2f\n", PrecisionScore(Ytrue, Ypred, "weighted", sampleWeight))
0.22
0.33
0.22

func R2Score

func R2Score(yTrue, yPred mat.Matrix, sampleWeight *mat.Dense, multioutput string) *mat.Dense

R2Score """R^2 (coefficient of determination) regression score function. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Read more in the :ref:`User Guide <r2Score>`. Parameters ---------- yTrue : array-like of shape = (nSamples) or (nSamples, nOutputs)

Ground truth (correct) target values.

yPred : array-like of shape = (nSamples) or (nSamples, nOutputs)

Estimated target values.

sampleWeight : array-like of shape = (nSamples), optional

Sample weights.

multioutput : string in ['rawValues', 'uniformAverage', \ 'varianceWeighted'] or None or array-like of shape (nOutputs)

Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default is "uniformAverage".
'rawValues' :
    Returns a full set of scores in case of multioutput input.
'uniformAverage' :
    Scores of all outputs are averaged with uniform weight.
'varianceWeighted' :
    Scores of all outputs are averaged, weighted by the variances
    of each individual output.
.. versionchanged:: 0.19
    Default value of multioutput is 'uniformAverage'.

Returns ------- z : float or ndarray of floats

The R^2 score or ndarray of scores if 'multioutput' is
'rawValues'.

Notes ----- This is not a symmetric function. Unlike most other scores, R^2 score may be negative (it need not actually be the square of a quantity R). References ---------- .. [1] `Wikipedia entry on the Coefficient of determination

<https://en.wikipedia.org/wiki/CoefficientOfDetermination>`_

Examples -------- >>> from sklearn.metrics import r2Score >>> yTrue = [3, -0.5, 2, 7] >>> yPred = [2.5, 0.0, 2, 8] >>> r2Score(yTrue, yPred) # doctest: +ELLIPSIS 0.948... >>> yTrue = [[0.5, 1], [-1, 1], [7, -6]] >>> yPred = [[0, 2], [-1, 2], [8, -5]] >>> r2Score(yTrue, yPred, multioutput='varianceWeighted') ... # doctest: +ELLIPSIS 0.938... >>> yTrue = [1,2,3] >>> yPred = [1,2,3] >>> r2Score(yTrue, yPred) 1.0 >>> yTrue = [1,2,3] >>> yPred = [2,2,2] >>> r2Score(yTrue, yPred) 0.0 >>> yTrue = [1,2,3] >>> yPred = [3,2,1] >>> r2Score(yTrue, yPred) -3.0 """

Example

Code:

// adapted from example in https://github.com/scikit-learn/scikit-learn/blob/0.19.1/sklearn/metrics/regression.py
yTrue := mat.NewDense(4, 1, []float64{3, -0.5, 2, 7})
yPred := mat.NewDense(4, 1, []float64{2.5, 0.0, 2, 8})
fmt.Printf("%.3f\n", R2Score(yTrue, yPred, nil, "").At(0, 0))

yTrue = mat.NewDense(3, 2, []float64{0.5, 1, -1, 1, 7, -6})
yPred = mat.NewDense(3, 2, []float64{0, 2, -1, 2, 8, -5})
fmt.Printf("%.3f\n", R2Score(yTrue, yPred, nil, "variance_weighted").At(0, 0))

yTrue = mat.NewDense(3, 1, []float64{1, 2, 3})
yPred = mat.NewDense(3, 1, []float64{1, 2, 3})
fmt.Printf("%.3f\n", R2Score(yTrue, yPred, nil, "").At(0, 0))

yTrue = mat.NewDense(3, 1, []float64{1, 2, 3})
yPred = mat.NewDense(3, 1, []float64{2, 2, 2})
fmt.Printf("%g\n", R2Score(yTrue, yPred, nil, "").At(0, 0))

yTrue = mat.NewDense(3, 1, []float64{1, 2, 3})
yPred = mat.NewDense(3, 1, []float64{3, 2, 1})
sampleWeight := mat.NewDense(3, 1, []float64{1, 1, 1})
fmt.Printf("%g\n", R2Score(yTrue, yPred, sampleWeight, "").At(0, 0))
0.949
0.938
1.000
0
-3

func ROCAUCScore

func ROCAUCScore(Ytrue, Yscore *mat.Dense, average string, sampleWeight []float64) float64

ROCAUCScore compute Area Under the Receiver Operating Characteristic Curve (ROC AUC) from prediction scores. y_true : array, shape = [n_samples] or [n_samples, n_classes] True binary labels in binary label indicators. y_score : array, shape = [n_samples] or [n_samples, n_classes] Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted'] If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``:

Calculate metrics globally by considering each element of the label
indicator matrix as a label.

``'macro'``:

Calculate metrics for each label, and find their unweighted
mean.  This does not take label imbalance into account.

``'weighted'``:

Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).

``'samples'``:

Calculate metrics for each instance, and find their average.

sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns auc : float

Example

Code:

// example adapted from https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/metrics/ranking.py#L256
Ytrue := mat.NewDense(4, 1, []float64{0, 0, 1, 1})
Yscores := mat.NewDense(4, 1, []float64{.1, .4, .35, .8})

fmt.Println("auc:", ROCAUCScore(Ytrue, Yscores, "", nil))
auc: 0.75

func ROCCurve

func ROCCurve(Ytrue, Yscore *mat.Dense, posLabel float64, sampleWeight []float64) (fpr, tpr, thresholds []float64)

ROCCurve Compute Receiver operating characteristic (ROC) y_true : array, shape = [n_samples] True binary labels in range {0, 1} or {-1, 1}. If labels are not binary, pos_label should be explicitly given. y_score : array, shape = [n_samples] Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). pos_label : int or str, default=None Label considered as positive and others are considered negative. sample_weight : array-like of shape = [n_samples], optional Sample weights.

Example

Code:

// example adapted from https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/metrics/ranking.py#L453
Y := mat.NewDense(4, 1, []float64{1, 1, 2, 2})
scores := mat.NewDense(4, 1, []float64{.1, .4, .35, .8})
fpr, tpr, thresholds := ROCCurve(Y, scores, 2., nil)
fmt.Println("fpr:", fpr)
fmt.Println("tpr:", tpr)
fmt.Println("thresholds:", thresholds)
fpr: [0 0.5 0.5 1]
tpr: [0.5 0.5 1 1]
thresholds: [0.8 0.4 0.35 0.1]

func RecallScore

func RecallScore(Ytrue, Ypred *mat.Dense, average string, sampleWeight []float64) float64

RecallScore v https://en.wikipedia.org/wiki/F1_score average must be macro|micro|weighted.. //TODO binary,samples

Example

Code:

// adapted from example in https://github.com/scikit-learn/scikit-learn/blob/0.19.1/sklearn/metrics/classification.py
Ytrue, Ypred := mat.NewDense(6, 1, []float64{0, 1, 2, 0, 1, 2}), mat.NewDense(6, 1, []float64{0, 2, 1, 0, 0, 1})
var sampleWeight []float64
fmt.Printf("%.2f\n", RecallScore(Ytrue, Ypred, "macro", sampleWeight))
fmt.Printf("%.2f\n", RecallScore(Ytrue, Ypred, "micro", sampleWeight))
fmt.Printf("%.2f\n", RecallScore(Ytrue, Ypred, "weighted", sampleWeight))
0.33
0.33
0.33
Documentation was rendered with GOOS=linux and GOARCH=amd64.

Jump to identifier

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to identifier