prometheus

package
v0.0.0-...-aa2b1f7 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Sep 18, 2025 License: Apache-2.0 Imports: 17 Imported by: 0

Documentation

Index

Constants

View Source
const (
	TIME_SECOND = int64(time.Second)
	TIME_MINUTE = 60 * TIME_SECOND
	TIME_HOUR   = 60 * TIME_MINUTE
	TIME_DAY    = TIME_HOUR * 24
)
View Source
const (
	TEMPLATE_GET_SERVICES_BY_FILTER      = `group by (svc_name) (increase(kindling_span_trace_duration_nanoseconds_count{%s}[%s])> 0)`
	TEMPLATE_GET_SERVICES                = `sum by(svc_name) (increase(kindling_span_trace_duration_nanoseconds_count{%s}[%s]))`
	TEMPLATE_GET_SERVICES_WITH_NAMESPACE = `sum by(svc_name, namespace) (increase(kindling_span_trace_duration_nanoseconds_count{%s}[%s]))`
	TEMPLATE_GET_ENDPOINTS               = `sum by(content_key) (increase(kindling_span_trace_duration_nanoseconds_count{%s}[%s]))`
	TEMPLATE_ERROR_RATE_INSTANCE         = "100*(" +
		"(sum by(%s)(increase(kindling_span_trace_duration_nanoseconds_count{%s, is_error='true'}[%s])) or 0)" +
		"/sum by(%s)(increase(kindling_span_trace_duration_nanoseconds_count{%s}[%s]))" +
		") or (sum by(%s)(increase(kindling_span_trace_duration_nanoseconds_count{%s}[%s])) * 0)" // or * 0补充中间缺失数据的场景
	TEMPLATE_GET_NAMESPACES            = `sum(kindling_span_trace_duration_nanoseconds_count{namespace=~".+"}[%s]) by (namespace)`
	TEMPLATE_GET_NAMESPACES_BY_SERVICE = `sum(kindling_span_trace_duration_nanoseconds_count{%s}[%s]) by (namespace)`

	TEMPLATE_GET_SERVICE_BY_DB = `` /* 306-byte string literal not displayed */

)
View Source
const (
	ContentKeyPQLFilter     = "content_key="
	ServicePQLFilter        = "svc_name="
	NamespacePQLFilter      = "namespace="
	NamespaceRegexPQLFilter = "namespace=~"
	ContainerIdPQLFilter    = "container_id="
	IsErrorPQLFilter        = "is_error="
	PodPQLFilter            = "pod="
	PidPQLFilter            = "pid="
	NodeNamePQLFilter       = "node_name="
	ClusterIDPQLFilter      = "cluster_id="

	ContentKeyRegexPQLFilter = "content_key=~"
	ServiceRegexPQLFilter    = "svc_name=~"
	DBNameRegexPQLFilter     = "name=~"

	ValueExistPQLValueFilter = ".+"
	LabelExistPQLValueFilter = ".*"

	PodRegexPQLFilter           = "pod=~"
	LogMetricPodRegexPQLFilter  = "pod_name=~"
	LogMetricNodeRegexPQLFilter = "host_name=~"
	LogMetricPidRegexPQLFilter  = "pid=~"
	ContainerIdRegexPQLFilter   = "container_id=~"
	PidRegexPQLFilter           = "pid=~"

	ClusterIDKey   = "cluster_id"
	ServiceNameKey = "svc_name"
	ContentKeyKey  = "content_key"
	NamespaceKey   = "namespace"
)
View Source
const (
	// metricGroup
	REALTIME MGroupName = "realtime"
	AVG      MGroupName = "avg"
	DOD      MGroupName = "dod" // Day-over-Day Growth Rate
	WOW      MGroupName = "wow" // Week-over-Week Growth Rate

	// metricName
	DEP_LATENCY     MName = "dep_latency"
	LATENCY         MName = "latency"
	ERROR_RATE      MName = "error"
	THROUGHPUT      MName = "throughput"
	LOG_ERROR_COUNT MName = "log_error_count"
)
View Source
const (
	AVG_1MIN_ERROR_BY_SERVICE = `` /* 255-byte string literal not displayed */

	AVG_1MIN_ERROR = `` /* 223-byte string literal not displayed */

	AVG_1MIN_LATENCY_BY_SERVICE = `` /* 268-byte string literal not displayed */

	AVG_1MIN_LATENCY = `` /* 237-byte string literal not displayed */

	AVG_ERROR_BY_PID = `` /* 391-byte string literal not displayed */

	AVG_ERROR_BY_CONTAINERID = `` /* 425-byte string literal not displayed */

	AVG_ERROR_BY_POD = `` /* 423-byte string literal not displayed */

	AVG_ERROR_BY_SERVICE = `` /* 255-byte string literal not displayed */

	AVG_ERROR = `` /* 223-byte string literal not displayed */

	ERROR_DOD_BY_PID = `` /* 862-byte string literal not displayed */

	ERROR_DOD_BY_CONTAINERID = `` /* 906-byte string literal not displayed */

	ERROR_DOD_BY_POD = `` /* 906-byte string literal not displayed */

	ERROR_DOD_BY_SERVICE = `` /* 579-byte string literal not displayed */

	ERROR_DOD = `` /* 517-byte string literal not displayed */

	ERROR_WOW_BY_PID = `` /* 859-byte string literal not displayed */

	ERROR_WOW_BY_CONTAINERID = `` /* 903-byte string literal not displayed */

	ERROR_WOW_BY_POD = `` /* 903-byte string literal not displayed */

	ERROR_WOW_BY_SERVICE = `` /* 577-byte string literal not displayed */

	ERROR_WOW = `` /* 516-byte string literal not displayed */

	AVG_LATENCY_BY_PID = `` /* 364-byte string literal not displayed */

	AVG_LATENCY_BY_CONTAINERID = `` /* 394-byte string literal not displayed */

	AVG_LATENCY_BY_POD = `` /* 390-byte string literal not displayed */

	AVG_LATENCY_BY_SERVICE = `` /* 268-byte string literal not displayed */

	AVG_LATENCY = `` /* 237-byte string literal not displayed */

	LATENCY_DOD_BY_PID = `` /* 762-byte string literal not displayed */

	LATENCY_DOD_BY_CONTAINERID = `` /* 821-byte string literal not displayed */

	LATENCY_DOD_BY_POD = `` /* 818-byte string literal not displayed */

	LATENCY_DOD_BY_SERVICE = `` /* 569-byte string literal not displayed */

	LATENCY_DOD = `` /* 510-byte string literal not displayed */

	LATENCY_WOW_BY_PID = `` /* 758-byte string literal not displayed */

	LATENCY_WOW_BY_CONTAINERID = `` /* 803-byte string literal not displayed */

	LATENCY_WOW_BY_POD = `` /* 754-byte string literal not displayed */

	LATENCY_WOW_BY_SERVICE = `` /* 566-byte string literal not displayed */

	LATENCY_WOW = `` /* 506-byte string literal not displayed */

	AVG_TPS_BY_PID         = `` /* 179-byte string literal not displayed */
	AVG_TPS_BY_CONTAINERID = `` /* 195-byte string literal not displayed */
	AVG_TPS_BY_POD         = `` /* 194-byte string literal not displayed */
	AVG_TPS_BY_SERVICE     = `` /* 132-byte string literal not displayed */
	AVG_TPS                = `(sum by (content_key, svc_name) (increase(kindling_span_trace_duration_nanoseconds_count{content_key=~".*"}[%s])))/%s`
	TPS_DOD_BY_PID         = `` /* 700-byte string literal not displayed */

	TPS_DOD_BY_CONTAINERID = `` /* 743-byte string literal not displayed */

	TPS_DOD_BY_POD = `` /* 759-byte string literal not displayed */

	TPS_DOD_BY_SERVICE = `` /* 537-byte string literal not displayed */

	TPS_DOD = `` /* 494-byte string literal not displayed */

	TPS_WOW_BY_PID = `` /* 698-byte string literal not displayed */

	TPS_WOW_BY_CONTAINERID = `` /* 743-byte string literal not displayed */

	TPS_WOW_BY_POD = `` /* 746-byte string literal not displayed */

	TPS_WOW_BY_SERVICE = `` /* 535-byte string literal not displayed */

	TPS_WOW = `` /* 490-byte string literal not displayed */

	DELAY_SOURCE_BY_SERVICE = `` /* 1052-byte string literal not displayed */

	DELAY_SOURCE = `` /* 959-byte string literal not displayed */

	TPS_DATA = `
    (sum by (content_key, svc_name) (increase(kindling_span_trace_duration_nanoseconds_count{content_key=~"%s"}[%s])))/%s
`
	LATENCY_DATA = `` /* 238-byte string literal not displayed */

	ERROR_DATA = `` /* 259-byte string literal not displayed */

	TPS_DATA_BY_PID = `` /* 185-byte string literal not displayed */

	LATENCY_DATA_BY_PID = `` /* 364-byte string literal not displayed */

	ERROR_DATA_BY_PID = `` /* 350-byte string literal not displayed */

	TPS_DATA_BY_CONTAINERID = `` /* 196-byte string literal not displayed */

	LATENCY_DATA_BY_CONTAINERID = `` /* 386-byte string literal not displayed */

	ERROR_DATA_BY_CONTAINERID = `` /* 363-byte string literal not displayed */

	TPS_DATA_BY_POD = `` /* 183-byte string literal not displayed */

	LATENCY_DATA_BY_POD = `` /* 356-byte string literal not displayed */

	ERROR_DATA_BY_POD = `` /* 588-byte string literal not displayed */

)
View Source
const DefaultDepLatency int64 = -1
View Source
const (
	FillNodeName = `` /* 185-byte string literal not displayed */

)
View Source
const (
	MIDDLEWARE_CONNECT_COUNT = "apo_network_middleware_connect"
)
View Source
const RES_MAX_VALUE float64 = 9999999

RES_MAX_VALUE returns the maximum value of the front end. If the value is equal to the same period last year, the maximum value is indicated.

View Source
const (
	TEMPLATE_GET_PODS_BY_FILTER = `group by(node_name, namespace, pod) (increase(originx_thread_polaris_nanoseconds_count{%s}[%s])> 0)`
)
View Source
const (
	TEMPLATE_LOG_SUM = `` /* 262-byte string literal not displayed */

)

Variables

View Source
var AlwaysFalseFilter = &AndFilter{Filters: []string{"apo_filter=\"never_match\""}}
View Source
var StrictPQL = false

using VM extended syntax by default

Functions

func AdjustREDValue

func AdjustREDValue(metricGroup MGroupName, metricName MName, value float64) float64

func EnableStrictPQL

func EnableStrictPQL()

func EscapeForLog

func EscapeForLog(s string) string

func EscapeRegexp

func EscapeRegexp(s string) string

func LogCountSeriesCombineSvcInfoWithPQLFilter

func LogCountSeriesCombineSvcInfoWithPQLFilter(rng string, gran string, filter PQLFilter, offset string) string

func PQLAvgDepLatencyWithFilters

func PQLAvgDepLatencyWithFilters(vector string, granularity string, filters []string) string

Average time spent on PQLAvgDepLatencyWithFilters queries from external dependencies Average time taken to return results for external dependencies

func PQLAvgDepLatencyWithPQLFilter

func PQLAvgDepLatencyWithPQLFilter(rng string, gran string, filter PQLFilter, offset string) string

func PQLAvgErrorRateWithFilters

func PQLAvgErrorRateWithFilters(vector string, granularity string, filters []string) string

Average error rate of PQLAvgErrorRateWithFilters query SQL requests

func PQLAvgErrorRateWithPQLFilter

func PQLAvgErrorRateWithPQLFilter(rng string, gran string, filter PQLFilter, offset string) string

Average error rate of PQLAvgErrorRateWithFilters query SQL requests

func PQLAvgLatencyWithFilters

func PQLAvgLatencyWithFilters(vector string, granularity string, filters []string) string

Average time consumption of PQLAvgLatencyWithFilters query

func PQLAvgLatencyWithPQLFilter

func PQLAvgLatencyWithPQLFilter(rng string, gran string, filter PQLFilter, offset string) string

Average time consumption of PQLAvgLatencyWithFilters query

func PQLAvgLogErrorCountCombineEndpointsInfoWithFilters

func PQLAvgLogErrorCountCombineEndpointsInfoWithFilters(vector string, granularity string, filters []string) string

Using `* on` to join logparser_level_count/logparser_exception_count and span_trace_duration_count

It is mainly composed of the following exprs:

( logparser_level_count + span_trace_duration_count ) left_join on(pod) span_trace_duration_count
or
( logparser_level_count + span_trace_duration_count ) left_join on(node,pid) span_trace_duration_count

func PQLAvgLogErrorCountCombineEndpointsInfoWithPQLFilter

func PQLAvgLogErrorCountCombineEndpointsInfoWithPQLFilter(rng string, gran string, filter PQLFilter, offset string) string

WARNING: LogErrorCount without service will not return

func PQLAvgLogErrorCountWithFilters

func PQLAvgLogErrorCountWithFilters(vector string, granularity string, filters []string) string

func PQLAvgLogErrorCountWithPQLFilter

func PQLAvgLogErrorCountWithPQLFilter(rng string, gran string, filter PQLFilter, offset string) string

func PQLAvgSQLErrorRateWithFilters

func PQLAvgSQLErrorRateWithFilters(vector string, granularity string, filters []string) string

Average error rate of PQLAvgSQLErrorRateWithFilters query SQL requests

func PQLAvgSQLErrorRateWithPQLFilter

func PQLAvgSQLErrorRateWithPQLFilter(rng string, gran string, filter PQLFilter, offset string) string

Average error rate of PQLAvgSQLErrorRateWithFilters query SQL requests

func PQLAvgSQLLatencyWithFilters

func PQLAvgSQLLatencyWithFilters(vector string, granularity string, filters []string) string

func PQLAvgSQLLatencyWithPQLFilter

func PQLAvgSQLLatencyWithPQLFilter(rng string, gran string, filter PQLFilter, offset string) string

func PQLAvgSQLTPSWithFilters

func PQLAvgSQLTPSWithFilters(vector string, granularity string, filters []string) string

Average TPS for PQLAvgTPSWithFilters query

func PQLAvgSQLTPSWithPQLFilter

func PQLAvgSQLTPSWithPQLFilter(rng string, gran string, filter PQLFilter, offset string) string

Average TPS for PQLAvgTPSWithFilters query

func PQLAvgTPSWithFilters

func PQLAvgTPSWithFilters(vector string, granularity string, filters []string) string

Average TPS for PQLAvgTPSWithFilters query

func PQLAvgTPSWithPQLFilter

func PQLAvgTPSWithPQLFilter(rng string, gran string, filter PQLFilter, offset string) string

Average TPS for PQLAvgTPSWithFilters query

func PQLDepLatencyRadioWithFilters

func PQLDepLatencyRadioWithFilters(vector string, granularity string, filters []string) string

Percentage of time spent by PQLDepLatencyRadioWithFilters queries from external dependencies. The percentage of the returned result that is externally dependent time to the total time consumed (0~1)

func PQLDepLatencyRadioWithPQLFilter

func PQLDepLatencyRadioWithPQLFilter(rng string, gran string, filter PQLFilter, offset string) string

Percentage of time spent by PQLDepLatencyRadioWithFilters queries from external dependencies. The percentage of the returned result that is externally dependent time to the total time consumed (0~1)

func PQLInstanceLog

func PQLInstanceLog(pqlTemplate AggPQLWithFilters, startTime int64, endTime int64, granularity Granularity, podFilterKVs, vmFilterKVs []string) (string, error)

PQLInstanceLog get the pql pod or vm of the instance-level log metric

func PQLIsPolarisMetricExitsWithFilters

func PQLIsPolarisMetricExitsWithFilters(vector string, granularity string, filters []string) string

PQLIsPolarisMetricExitsWithFilters uses the onCPU time in the Polaris indicator to determine whether the Polaris metric exists.

func PQLMonitorStatus

func PQLMonitorStatus(vector string, granularity string, filters []string) string

PQLMonitorStatus uptime-kuma monitoring item status

func PQLMonitorStatusWithPQLFilter

func PQLMonitorStatusWithPQLFilter(rng string, gran string, filter PQLFilter, offset string) string

func PQLNormalLogCountWithFilters

func PQLNormalLogCountWithFilters(vector string, granularity string, filters []string) string

PQLNormalLogCountWithFilters check for normal logs

func PQLNormalLogCountWithPQLFilter

func PQLNormalLogCountWithPQLFilter(rng string, gran string, filter PQLFilter, offset string) string

func PQLPolarisActiveSeries

func PQLPolarisActiveSeries(rng string, gran string, filters PQLFilter, offset string) string

PQLIsPolarisMetricExitsWithFilters uses the onCPU time in the Polaris indicator to determine whether the Polaris metric exists.

func QueryContainerIdPromql

func QueryContainerIdPromql(duration string, queryType QueryType, serviceName string, contentKey string) string

func QueryContainerIdRangePromql

func QueryContainerIdRangePromql(duration string, queryType QueryType, contentKey string, serviceName string) string

func QueryEndPointPromql

func QueryEndPointPromql(duration string, queryType QueryType, serviceNames string) string

func QueryEndPointRangePromql

func QueryEndPointRangePromql(step string, duration string, queryType QueryType, contentKeys []string) string

func QueryLogByContainerIdPromql

func QueryLogByContainerIdPromql(duration string, queryType QueryType, containerIds []string) string

func QueryLogByPidPromql

func QueryLogByPidPromql(duration string, queryType QueryType, pids []string) string

func QueryLogPromql

func QueryLogPromql(duration string, queryType QueryType, pods []string) string

func QueryNodeName

func QueryNodeName(serviceName string, contentKey string) string

func QueryPidPromql

func QueryPidPromql(duration string, queryType QueryType, serviceName string, contentKey string) string

func QueryPidRangePromql

func QueryPidRangePromql(duration string, queryType QueryType, contentKey string, serviceName string) string

func QueryPodPromql

func QueryPodPromql(duration string, queryType QueryType, serviceName string, contentKey string) string

func QueryPodRangePromql

func QueryPodRangePromql(duration string, queryType QueryType, contentKey string, serviceName string) string

func RegexContainsValue

func RegexContainsValue(key string) string

RegexContainsValue create a regular expression that matches a single target value need to be used with xxxRegexPQLFilter

func RegexMultipleValue

func RegexMultipleValue(key ...string) string

RegexMultipleValue create a regular expression that matches multiple target values need to be used with xxxRegexPQLFilter

func ReverseSortWithMetrics

func ReverseSortWithMetrics(sortType request.SortType) func(i, j *EndpointMetrics) int

func VecFromDuration

func VecFromDuration(duration time.Duration) (vec string)

func VecFromS2E

func VecFromS2E(startTime int64, endTime int64) (vec string)

VecFromS2E 根据起止时间戳获取时间范围 用于PromQL查询时的内置聚合

e.g. avg (xxx[${vec}])

Types

type AggPQLWithFilters

type AggPQLWithFilters func(vector string, granularity string, filterKVs []string) string

AggPQLWithFilters generate PQL statements Generate PQL using vector and filterKVs @vector: Specify the aggregation time range @granularity: Specify aggregation granularity @filterKVs: filter condition, in the format of key1, value1, key2, and value2

func DayOnDay

func DayOnDay(pqlTemplate AggPQLWithFilters) AggPQLWithFilters

Calculate the Day-over-Day Growth Rate rate of the metric.

func WeekOnWeek

func WeekOnWeek(pqlTemplate AggPQLWithFilters) AggPQLWithFilters

Calculate Week-over-Week Growth Rate of the metric.

func WithDefaultIFPolarisMetricExits

func WithDefaultIFPolarisMetricExits(pqlTemplate AggPQLWithFilters, defaultValue int64) AggPQLWithFilters

type AndFilter

type AndFilter struct {
	Filters []string
}

func EqualFilter

func EqualFilter(k, v string) *AndFilter

Fast Filter

func EqualIfNotEmptyFilter

func EqualIfNotEmptyFilter(k, v string) *AndFilter

func NewFilter

func NewFilter() *AndFilter

func NotEqualFilter

func NotEqualFilter(k, v string) *AndFilter

func PatternFilter

func PatternFilter(pattern, v string) *AndFilter

func RegexMatchFilter

func RegexMatchFilter(k, regexPattern string) *AndFilter

func RegexMatchIfNotEmptyFilter

func RegexMatchIfNotEmptyFilter(k, regexPattern string) *AndFilter

func (*AndFilter) AddPatternFilter

func (f *AndFilter) AddPatternFilter(pattern, v string) PQLFilter

func (*AndFilter) Clone

func (f *AndFilter) Clone() PQLFilter

func (*AndFilter) Equal

func (f *AndFilter) Equal(k, v string) PQLFilter

func (*AndFilter) EqualIfNotEmpty

func (f *AndFilter) EqualIfNotEmpty(k, v string) PQLFilter

func (*AndFilter) NotEqual

func (f *AndFilter) NotEqual(k, v string) PQLFilter

func (*AndFilter) RegexMatch

func (f *AndFilter) RegexMatch(k, regexPattern string) PQLFilter

func (*AndFilter) SplitFilters

func (f *AndFilter) SplitFilters(keys []string) (PQLFilter, PQLFilter)

func (*AndFilter) String

func (f *AndFilter) String() string

type ConvertFromLabels

type ConvertFromLabels interface {
	ConvertFromLabels(labels Labels) ConvertFromLabels
}

type DescendantMetrics

type DescendantMetrics struct {
	ServiceName string         `json:"serviceName"` // service name
	EndPoint    string         `json:"endpoint"`    // Endpoint
	LatencyP90  []MetricsPoint `json:"latencyP90"`  // P90 curve value
}

type EndpointKey

type EndpointKey struct {
	ContentKey string // URL
	SvcName    string // Name of the service to which the url belongs
}

func (EndpointKey) ConvertFromLabels

func (e EndpointKey) ConvertFromLabels(labels Labels) ConvertFromLabels

type EndpointMetrics

type EndpointMetrics struct {
	EndpointKey

	/*
		DelaySource shows the main reason for time-cost
			[0,0.5): delay mainly from their self;
			[0.5,1]: delay mainly from their downstream
			'nil': no network metric found, unable to analyze major delay causes
	*/
	DelaySource   *float64
	AlertCount    int
	NamespaceList []string // Namespace containing the endpoint

	IsLatencyExceeded   bool
	IsErrorRateExceeded bool
	IsTPSExceeded       bool

	Avg1MinLatencyMutationRate float64 // delayed mutation rate
	Avg1MinErrorMutationRate   float64 // error rate mutation rate

	REDMetrics REDMetrics

	LatencyData   []Points // Data of delay time period
	ErrorRateData []Points // Data for the error rate time period
	TPMData       []Points // Data for TPM time period

	AvgLogErrorCount float64
}

func (*EndpointMetrics) AppendGroupIfNotExist

func (e *EndpointMetrics) AppendGroupIfNotExist(_ MGroupName, metricName MName) bool

func (*EndpointMetrics) InitEmptyGroup

func (e *EndpointMetrics) InitEmptyGroup(key ConvertFromLabels) MetricGroup

func (*EndpointMetrics) SetValue

func (e *EndpointMetrics) SetValue(metricGroup MGroupName, metricName MName, value float64)

func (*EndpointMetrics) SetValues

func (e *EndpointMetrics) SetValues(_ MGroupName, metricName MName, points []Points)

type EndpointsMap

type EndpointsMap = MetricGroupMap[EndpointKey, *EndpointMetrics]

EndpointsMap is used to store the query results of multiple metrics of the same granularity, using MergeMetricResults merge.

func FetchEndpointsData

func FetchEndpointsData(
	ctx core.Context,
	promRepo Repo,
	filter PQLFilter,
	startTime, endTime time.Time,
	opts ...FetchEMOption) (*EndpointsMap, error)

type FetchEMOption

type FetchEMOption func(
	ctx core.Context,
	promRepo Repo,
	em *EndpointsMap,
	startTime, endTime time.Time,
	filter PQLFilter,
) error

func WithDelaySource

func WithDelaySource() FetchEMOption

func WithLogErrorCount

func WithLogErrorCount() FetchEMOption

func WithNamespace

func WithNamespace() FetchEMOption

func WithREDChart

func WithREDChart(step time.Duration) FetchEMOption

func WithREDMetric

func WithREDMetric() FetchEMOption

func WithRealTimeREDMetric

func WithRealTimeREDMetric() FetchEMOption

type Granularity

type Granularity string
const (
	SVCGranularity              Granularity = "svc_name"
	EndpointGranularity         Granularity = "svc_name, content_key"
	NSEndpointGranularity       Granularity = "namespace, svc_name, content_key"
	InstanceEndpointGranularity Granularity = "svc_name, content_key, container_id, node_name, pid, pod, namespace, node_ip, cluster_id"
	InstanceGranularity         Granularity = "svc_name, container_id, node_name, pid, pod, namespace, node_ip, cluster_id"
	LogGranularity              Granularity = "pid,host_name,host_ip,container_id,pod_name,namespace"
	DBOperationGranularity      Granularity = "svc_name, db_system, db_name, name, db_url"

	DBInstanceGranularity Granularity = "db_url,container_id,node_name,pid,cluster_id"
)

type InstanceKey

type InstanceKey struct {
	ServiceName string `json:"service_name"`
	PID         string `json:"pid"`
	ContainerId string `json:"container_id"`
	Pod         string `json:"pod"`
	Namespace   string `json:"namespace"`
	NodeName    string `json:"node_name"`
	NodeIP      string `json:"node_ip"`
	ClusterID   string `json:"cluster_id"`
}

func (InstanceKey) ConvertFromLabels

func (i InstanceKey) ConvertFromLabels(labels Labels) ConvertFromLabels

func (InstanceKey) GenInstanceName

func (i InstanceKey) GenInstanceName() string

type InstanceMetrics

type InstanceMetrics struct {
	InstanceKey

	REDMetrics REDMetrics

	LogDayOverDay   *float64
	LogWeekOverWeek *float64
	LogAVGData      *float64

	LatencyData   []Points
	ErrorRateData []Points
	TPMData       []Points
	LogData       []Points
}

InstanceMetrics instance granularity metric results

func (*InstanceMetrics) AppendGroupIfNotExist

func (e *InstanceMetrics) AppendGroupIfNotExist(_ MGroupName, metricName MName) bool

func (*InstanceMetrics) InitEmptyGroup

func (e *InstanceMetrics) InitEmptyGroup(key ConvertFromLabels) MetricGroup

func (*InstanceMetrics) SetValue

func (e *InstanceMetrics) SetValue(metricGroup MGroupName, metricName MName, value float64)

func (*InstanceMetrics) SetValues

func (e *InstanceMetrics) SetValues(_ MGroupName, metricName MName, points []Points)

type Labels

type Labels struct {
	ContainerID string `json:"container_id"`
	ContentKey  string `json:"content_key"`
	Instance    string `json:"instance"`
	IsError     string `json:"is_error"`
	Job         string `json:"job"`
	NodeName    string `json:"node_name"`
	POD         string `json:"pod"`
	SvcName     string `json:"svc_name"`
	TopSpan     string `json:"top_span"`
	PID         string `json:"pid"`
	Namespace   string `json:"namespace"`
	ClusterID   string `json:"cluster_id"`
	NodeIP      string `json:"node_ip"`

	DBSystem string `json:"db_system"`
	DBName   string `json:"db_name"`
	// Name, currently represents the Opertaion section in SQL
	// e.g: SELECT trip
	Name     string `json:"name"`
	DBUrl    string `json:"db_url"`
	PeerIP   string `json:"peer_ip"`
	PeerPort string `json:"peer_port"`

	MonitorName string `json:"monitor_name"`
}

func (*Labels) Extract

func (l *Labels) Extract(metric prommodel.Metric)

Extract extract the required label Changes of Labels field need to be synchronized

func (*Labels) ExtractGran

func (l *Labels) ExtractGran(granularity []string, metric prommodel.LabelSet)

func (*Labels) SetValue

func (l *Labels) SetValue(name string, value string)

type MGroupName

type MGroupName string

metricGroup Name

type MName

type MName string

metricName

type Metric

type Metric string
const (
	PROFILING_EPOLL_DURATION_SUM Metric = "kindling_profiling_epoll_duration_nanoseconds_sum"
	PROFILING_NET_DURATION_SUM   Metric = "kindling_profiling_net_duration_nanoseconds_sum"
	PROFILING_CPU_DURATION_SUM   Metric = "kindling_profiling_cpu_duration_nanoseconds_sum"

	SPAN_TRACE_COUNT        Metric = "kindling_span_trace_duration_nanoseconds_count"
	SPAN_TRACE_DURATION_SUM Metric = "kindling_span_trace_duration_nanoseconds_sum"

	SPAN_DB_COUNT        Metric = "kindling_db_duration_nanoseconds_count"
	SPAN_DB_DURATION_SUM Metric = "kindling_db_duration_nanoseconds_sum"

	LOG_LEVEL_COUNT     Metric = "originx_logparser_level_count_total"
	LOG_EXCEPTION_COUNT Metric = "originx_logparser_exception_count_total"

	MONITOR_STATUS Metric = "monitor_status"
)

type MetricGroup

type MetricGroup interface {
	InitEmptyGroup(key ConvertFromLabels) MetricGroup
	AppendGroupIfNotExist(metricGroup MGroupName, metricName MName) bool
	SetValue(metricGroup MGroupName, metricName MName, value float64)
	SetValues(metricGroup MGroupName, metricName MName, points []Points)
}

type MetricGroupInterface

type MetricGroupInterface interface {
	MergeMetricResults(metricGroup MGroupName, metricName MName, metricResults []MetricResult)

	MergeRangeMetricResults(metricGroup MGroupName, metricName MName, metricResults []MetricResult)
}

type MetricGroupMap

type MetricGroupMap[K interface {
	comparable
	ConvertFromLabels
}, V MetricGroup] struct {
	// used to return a list
	MetricGroupList []V
	// Used to quickly query the corresponding key by MetricGroup
	MetricGroupMap map[K]V
}

func (*MetricGroupMap[K, V]) MergeMetricResults

func (m *MetricGroupMap[K, V]) MergeMetricResults(metricGroup MGroupName, metricName MName, metricResults []MetricResult)

func (*MetricGroupMap[K, V]) MergeRangeMetricResults

func (m *MetricGroupMap[K, V]) MergeRangeMetricResults(metricGroup MGroupName, metricName MName, metricResults []MetricResult)

type MetricResult

type MetricResult struct {
	Metric Labels   `json:"metric"`
	Values []Points `json:"values"`
}

type MetricsPoint

type MetricsPoint struct {
	Timestamp int64   `json:"timestamp"` // time (microseconds)
	Value     float64 `json:"value"`     // value
}

type OrFilter

type OrFilter struct {
	Filters []AndFilter
}

func Or

func Or(filters ...PQLFilter) *OrFilter

func (*OrFilter) AddPatternFilter

func (f *OrFilter) AddPatternFilter(pattern, v string) PQLFilter

func (*OrFilter) Clone

func (o *OrFilter) Clone() PQLFilter

func (*OrFilter) Equal

func (f *OrFilter) Equal(k, v string) PQLFilter

func (*OrFilter) NotEqual

func (f *OrFilter) NotEqual(k, v string) PQLFilter

func (*OrFilter) RegexMatch

func (f *OrFilter) RegexMatch(k, regexPattern string) PQLFilter

func (*OrFilter) SplitFilters

func (o *OrFilter) SplitFilters(keys []string) (PQLFilter, PQLFilter)

func (*OrFilter) String

func (f *OrFilter) String() string

type P9xCondition

type P9xCondition struct {
	Key    string
	Values []string
}

type PQLFilter

type PQLFilter interface {
	// { $k='$v' }
	Equal(k, v string) PQLFilter
	// { $k!='$v' }
	NotEqual(k, v string) PQLFilter
	// { $k=~'$regexPattern' }
	RegexMatch(k, regexPattern string) PQLFilter
	// compatible with old predefined FilterPattern
	//
	// e.g. { $pattern$v }
	AddPatternFilter(pattern, v string) PQLFilter

	// HACK remove related filter directly
	SplitFilters(keys []string) (remain PQLFilter, removed PQLFilter)

	// return PQL using VM-extended syntax syntax
	//
	// e.g. {a=1 or b=2}ss
	String() string
	// contains filtered or unexported methods
}

PQLFilter provides fine-grained filtering, primarily used for data group scenarios.

All conditions will be expanded into DNF, which may cause combinatorial explosion. It is recommended to use RegexFilter instead of OrFilter to reduce complexity

func And

func And(filters ...PQLFilter) PQLFilter

func Clone

func Clone(filter PQLFilter) PQLFilter

type PQLTemplate

type PQLTemplate func(vector string, gran string, filter PQLFilter, offset string) string

func DayOnDayTemplate

func DayOnDayTemplate(template PQLTemplate) PQLTemplate

(a[rangeV] / a[rangeV] offset 24h)

func DayOnDayWithDef

func DayOnDayWithDef(template PQLTemplate, def int64) PQLTemplate

(a[rangeV] / a[rangeV] offset 24h) or (a[rangeV] * 0 + def)

func PQLMetricSeries

func PQLMetricSeries(metric ...Metric) PQLTemplate

func WeekOnWeekTemplate

func WeekOnWeekTemplate(template PQLTemplate) PQLTemplate

func WeekOnWeekWithPQLFilter

func WeekOnWeekWithPQLFilter(template PQLTemplate, def int64) PQLTemplate

(a[rangeV] / a[rangeV] offset 7d) or (a[rangeV] * 0 + def)

func WithDefaultForPolarisActiveSeries

func WithDefaultForPolarisActiveSeries(template PQLTemplate, defaultValue int64) PQLTemplate

type Points

type Points struct {
	TimeStamp int64
	Value     float64
}

type QueryType

type QueryType int
const (
	AvgError QueryType = iota // average error rate
	ErrorDOD
	ErrorWOW
	ErrorData
	AvgLatency
	LatencyDOD
	LatencyWOW
	LatencyData
	AvgTPS
	TPSDOD
	TPSWOW
	TPSData
	DelaySource
	AvgLog
	LogDOD
	LogWOW
	LogNow
	LogYesterday
	LogLastWeek
	Avg1minError
	Avg1minLatency
)

atodo Convert regular expressions

type QueryWithPQLFilter

type QueryWithPQLFilter interface {
	QueryMetricsWithPQLFilter(ctx core.Context, pqlTpl PQLTemplate, startTime int64, endTime int64, gran Granularity, filter PQLFilter) ([]MetricResult, error)
	QueryRangeMetricsWithPQLFilter(ctx core.Context, pqlTpl PQLTemplate, startTime int64, endTime int64, stepMicroS int64, gran Granularity, filter PQLFilter) ([]MetricResult, error)

	// QuerySeriesWithPQLFilter(ctx core.Context, startTime int64, endTime int64, filter PQLFilter, metric ...Metric) ([]prom_m.LabelSet, error)
	FillMetric(ctx core.Context, res MetricGroupInterface, metricGroup MGroupName, startTime, endTime time.Time, filter PQLFilter, granularity Granularity) error
	FillRangeMetric(ctx core.Context, res MetricGroupInterface, metricGroup MGroupName, startTime, endTime time.Time, step time.Duration, filter PQLFilter, granularity Granularity) error

	GetInstanceListByPQLFilter(ctx core.Context, startTime int64, endTime int64, filter PQLFilter) (*model.ServiceInstances, error)
	GetMultiSVCInstanceListByPQLFilter(ctx core.Context, startTime int64, endTime int64, filter PQLFilter) (map[string]*model.ServiceInstances, error)

	// Query the db instance for specified service
	GetDescendantDatabase(ctx core.Context, startTime int64, endTime int64, filter PQLFilter) ([]model.MiddlewareInstance, error)
}

type REDMetric

type REDMetric struct {
	Latency   *float64
	ErrorRate *float64
	TPM       *float64
}

func (*REDMetric) IsEmpty

func (m *REDMetric) IsEmpty() bool

type REDMetrics

type REDMetrics struct {
	Realtime REDMetric
	Avg      REDMetric
	DOD      REDMetric
	WOW      REDMetric
}

func (*REDMetrics) AppendGroupIfNotExist

func (m *REDMetrics) AppendGroupIfNotExist(_ MGroupName, metricName MName) bool

func (*REDMetrics) InitEmptyGroup

func (m *REDMetrics) InitEmptyGroup(_ ConvertFromLabels) MetricGroup

func (*REDMetrics) SetValue

func (m *REDMetrics) SetValue(metricGroup MGroupName, metricName MName, value float64)

func (*REDMetrics) SetValues

func (m *REDMetrics) SetValues(metricGroup MGroupName, metricName MName, values []Points)

type Repo

type Repo interface {
	// ========== span_trace_duration_bucket Start ==========
	// Query the P90 curve based on the service list, URL list, time period and step size.
	QueryRangePercentile(ctx core.Context, startTime int64, endTime int64, step int64, nodes *model.TopologyNodes) ([]DescendantMetrics, error)
	// Query the P90 delay curve of the instance
	QueryInstanceP90(ctx core.Context, startTime int64, endTime int64, step int64, endpoint string, instance *model.ServiceInstance) (map[int64]float64, error)

	// ========== span_trace_duration_count Start ==========
	// Query the service list
	GetServiceList(ctx core.Context, startTime int64, endTime int64, filter PQLFilter) ([]string, error)
	// 根据过滤规则查询服务列表
	GetServiceListByFilter(ctx core.Context, startTime time.Time, endTime time.Time, filterKVs ...string) ([]string, error)
	// 基于DatabaseURL,IP,Port查询上游服务列表
	GetServiceListByDatabase(ctx core.Context, startTime, endTime time.Time, dbURL, dbIP, dbPort string) ([]string, error)
	// Query the service instance list. The URL can be empty.
	GetServiceWithNamespace(ctx core.Context, startTime, endTime int64, namespace []string) (map[string][]string, error)
	// GetServiceNamespace  Get service's namespaces.
	GetServiceNamespace(ctx core.Context, startTime, endTime int64, service string) ([]string, error)
	// GetInstanceList query service instance list. URL can be empty.
	GetInstanceList(ctx core.Context, startTime int64, endTime int64, serviceName string, url string) (*model.ServiceInstances, error)
	// Query the list of active instances
	GetActiveInstanceList(ctx core.Context, startTime int64, endTime int64, clusterId string, serviceNames []string) (*model.ServiceInstances, error)
	// Query the service Endpoint list. The service permission is empty.
	GetServiceEndPointList(ctx core.Context, startTime int64, endTime int64, serviceName string) ([]string, error)
	// Query the service Endpoint list. The service permission is empty.
	GetServiceEndPointListByPQLFilter(ctx core.Context, startTime int64, endTime int64, filter PQLFilter) ([]string, error)
	// Query service instance failure rate
	QueryInstanceErrorRate(ctx core.Context, startTime int64, endTime int64, step int64, endpoint string, instance *model.ServiceInstance) (map[int64]float64, error)

	QueryData(ctx core.Context, searchTime time.Time, query string) ([]MetricResult, error)
	QueryRangeData(ctx core.Context, startTime time.Time, endTime time.Time, query string, step time.Duration) ([]MetricResult, error)
	QueryLatencyData(ctx core.Context, searchTime time.Time, query string) ([]MetricResult, error)
	QueryRangeLatencyData(ctx core.Context, startTime time.Time, endTime time.Time, query string, step time.Duration) ([]MetricResult, error)
	QueryErrorRateData(ctx core.Context, searchTime time.Time, query string) ([]MetricResult, error)
	QueryRangeErrorData(ctx core.Context, startTime time.Time, endTime time.Time, query string, step time.Duration) ([]MetricResult, error)

	// ========== originx_logparser_level_count_total Start ==========
	// Query the number of errors in the instance log
	QueryLogCountByInstanceId(ctx core.Context, instance *model.ServiceInstance, startTime int64, endTime int64, step int64) (map[int64]float64, error)
	// QueryInstanceLogRangeData query instance-level log graphs
	QueryInstanceLogRangeData(ctx core.Context, pqlTemplate AggPQLWithFilters, startTime int64, endTime int64, stepMicroS int64, granularity Granularity, podFilterKVs, vmFilterKVs []string) ([]MetricResult, error)

	// ========== db_duration_bucket Start ==========
	// Query the P90 curve based on the service list, URL list, time period and step size.
	QueryDbRangePercentile(ctx core.Context, startTime int64, endTime int64, step int64, nodes *model.TopologyNodes) ([]DescendantMetrics, error)

	// ========== external_duration_bucket Start ==========
	// Query the P90 curve based on the service list, URL list, time period and step size.
	QueryExternalRangePercentile(ctx core.Context, startTime int64, endTime int64, step int64, nodes *model.TopologyNodes) ([]DescendantMetrics, error)

	// ========== mq_duration_bucket Start ==========
	// Query the P90 curve based on the service list, URL list, time period and step size.
	QueryMqRangePercentile(ctx core.Context, startTime int64, endTime int64, step int64, nodes *model.TopologyNodes) ([]DescendantMetrics, error)

	QueryAggMetricsWithFilter(ctx core.Context, pqlTemplate AggPQLWithFilters, startTime int64, endTime int64, granularity Granularity, filterKVs ...string) ([]MetricResult, error)
	QueryRangeAggMetricsWithFilter(ctx core.Context, pqlTemplate AggPQLWithFilters, startTime int64, endTime int64, step int64, granularity Granularity, filterKVs ...string) ([]MetricResult, error)
	// originx_process_start_time
	QueryProcessStartTime(ctx core.Context, startTime time.Time, endTime time.Time, instances []*model.ServiceInstance) (map[model.ServiceInstance]int64, error)
	GetApi() v1.API
	GetRange() string

	LabelValues(ctx core.Context, expr string, label string, startTime, endTime int64) (prommodel.LabelValues, error)
	QueryResult(ctx core.Context, expr string, regex string, startTime, endTime int64) ([]string, error)

	GetNamespaceList(ctx core.Context, startTime int64, endTime int64, filter PQLFilter) ([]string, error)
	GetNamespaceWithService(ctx core.Context, startTime, endTime int64) (map[string][]string, error)

	GetPodList(ctx core.Context, startTime int64, endTime int64, nodeName string, namespace string, podName string) ([]*model.Pod, error)
	QueryWithPQLFilter
}

func New

func New(
	logger *zap.Logger,
	address string,
	storage string) (Repo, error)

type SQLKey

type SQLKey struct {
	Service string `json:"service"`
	// DBSystem -> ${SQL Type}, e.g: Mysql
	DBSystem string `json:"dbSystem"`
	// DBName -> ${database}
	DBName string `json:"dbName"`
	// DBOperation -> ${operation} ${table}, e.g: SELECT trip
	DBOperation string `json:"dbOperation"`
	DBUrl       string `json:"dbUrl"`
}

func (SQLKey) ConvertFromLabels

func (k SQLKey) ConvertFromLabels(labels Labels) ConvertFromLabels

type ServiceEndpointMetrics

type ServiceEndpointMetrics struct {
	EndpointKey

	REDMetrics    REDMetrics
	LatencyData   []Points
	ErrorRateData []Points
	TPMData       []Points
}

func (*ServiceEndpointMetrics) AppendGroupIfNotExist

func (e *ServiceEndpointMetrics) AppendGroupIfNotExist(_ MGroupName, metricName MName) bool

func (*ServiceEndpointMetrics) InitEmptyGroup

func (e *ServiceEndpointMetrics) InitEmptyGroup(key ConvertFromLabels) MetricGroup

func (*ServiceEndpointMetrics) SetValue

func (e *ServiceEndpointMetrics) SetValue(metricGroup MGroupName, metricName MName, value float64)

func (*ServiceEndpointMetrics) SetValues

func (e *ServiceEndpointMetrics) SetValues(_ MGroupName, metricName MName, points []Points)

type ServiceKey

type ServiceKey struct {
	SvcName string // Name of the service to which the url belongs
}

func (ServiceKey) ConvertFromLabels

func (S ServiceKey) ConvertFromLabels(labels Labels) ConvertFromLabels

type ServiceMetrics

type ServiceMetrics struct {
	ServiceKey

	REDMetrics    REDMetrics
	LatencyData   []Points
	ErrorRateData []Points
	TPMData       []Points
}

func (*ServiceMetrics) AppendGroupIfNotExist

func (e *ServiceMetrics) AppendGroupIfNotExist(_ MGroupName, metricName MName) bool

func (*ServiceMetrics) InitEmptyGroup

func (e *ServiceMetrics) InitEmptyGroup(key ConvertFromLabels) MetricGroup

func (*ServiceMetrics) SetValue

func (e *ServiceMetrics) SetValue(metricGroup MGroupName, metricName MName, value float64)

func (*ServiceMetrics) SetValues

func (e *ServiceMetrics) SetValues(_ MGroupName, metricName MName, points []Points)

type UnionP9xBuilder

type UnionP9xBuilder struct {
	// contains filtered or unexported fields
}

func NewUnionP9xBuilder

func NewUnionP9xBuilder(value string, tableName string, labels []string, duration time.Duration) *UnionP9xBuilder

func (*UnionP9xBuilder) AddCondition

func (p9x *UnionP9xBuilder) AddCondition(key string, values []string) error

func (*UnionP9xBuilder) AddExtraCondition

func (p9x *UnionP9xBuilder) AddExtraCondition(condition string)

func (*UnionP9xBuilder) ToString

func (p9x *UnionP9xBuilder) ToString() string

type WrappedApi

type WrappedApi struct {
	v1.API
	// contains filtered or unexported fields
}

func (*WrappedApi) Query

func (api *WrappedApi) Query(ctx context.Context, query string, ts time.Time, opts ...v1.Option) (model.Value, v1.Warnings, error)

func (*WrappedApi) QueryRange

func (api *WrappedApi) QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error)

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL