View Source
const (
	// RequestTimeout is the default cloud request timeout
	RequestTimeout = 20 * time.Second
	// RetryInterval is the default cloud request retry interval
	RetryInterval = 500 * time.Millisecond
	// MaxRetries specifies max retry attempts
	MaxRetries = 3
View Source
const (
	DataTypeSingle             = "Point"
	DataTypeMap                = "Points"
	DataTypeAggregatedHTTPReqs = "AggregatedPoints"

    DataType constants

    View Source
    const TestName = "k6 test"

      TestName is the default Load Impact Cloud test name


      View Source
      var (
      	ErrNotAuthorized    = errors.New("Not allowed to upload result to Load Impact cloud")
      	ErrNotAuthenticated = errors.New("Failed to authenticate with Load Impact cloud")
      	ErrUnknown          = errors.New("An error occurred talking to Load Impact cloud")


      func MergeFromExternal

      func MergeFromExternal(external map[string]json.RawMessage, conf *Config) error

        MergeFromExternal merges three fields from json in a loadimact key of the provided external map

        func URLForResults

        func URLForResults(refID string, config Config) string


        type AggregatedMetric

        type AggregatedMetric struct {
        	// Updated by Calc() and used in the JSON output
        	Min float64 `json:"min"`
        	Max float64 `json:"max"`
        	Avg float64 `json:"avg"`
        	// contains filtered or unexported fields

          AggregatedMetric is used to store aggregated information for a particular metric in an SampleDataAggregatedMap.

          func (*AggregatedMetric) Add

          func (am *AggregatedMetric) Add(t time.Duration)

            Add the new duration to the internal sum and update Min and Max if necessary

            func (*AggregatedMetric) Calc

            func (am *AggregatedMetric) Calc(count float64)

              Calc populates the float fields for min and max and calculates the average value

              type Client

              type Client struct {
              	// contains filtered or unexported fields

                Client handles communication with Load Impact cloud API.

                func NewClient

                func NewClient(logger logrus.FieldLogger, token, host, version string) *Client

                  NewClient return a new client for the cloud API

                  func (*Client) CreateTestRun

                  func (c *Client) CreateTestRun(testRun *TestRun) (*CreateTestRunResponse, error)

                  func (*Client) Do

                  func (c *Client) Do(req *http.Request, v interface{}) error

                  func (*Client) GetTestProgress

                  func (c *Client) GetTestProgress(referenceID string) (*TestProgressResponse, error)

                  func (*Client) Login

                  func (c *Client) Login(email string, password string) (*LoginResponse, error)

                  func (*Client) NewRequest

                  func (c *Client) NewRequest(method, url string, data interface{}) (*http.Request, error)

                    NewRequest creates new HTTP request.

                    This is the same as http.NewRequest, except that data if not nil will be serialized in json format.

                    func (*Client) PushMetric

                    func (c *Client) PushMetric(referenceID string, noCompress bool, s []*Sample) error

                      PushMetric pushes the provided metric samples for the given referenceID

                      func (*Client) StartCloudTestRun

                      func (c *Client) StartCloudTestRun(name string, projectID int64, arc *lib.Archive) (string, error)

                      func (*Client) StopCloudTestRun

                      func (c *Client) StopCloudTestRun(referenceID string) error

                      func (*Client) TestFinished

                      func (c *Client) TestFinished(referenceID string, thresholds ThresholdResult, tained bool, runStatus lib.RunStatus) error

                      func (*Client) ValidateOptions

                      func (c *Client) ValidateOptions(options lib.Options) error

                      type Collector

                      type Collector struct {
                      	// contains filtered or unexported fields

                        Collector sends result data to the Load Impact cloud service.

                        func New

                        func New(
                        	logger logrus.FieldLogger,
                        	conf Config, src *loader.SourceData, opts lib.Options, executionPlan []lib.ExecutionStep, version string,
                        ) (*Collector, error)

                          New creates a new cloud collector

                          func (*Collector) Collect

                          func (c *Collector) Collect(sampleContainers []stats.SampleContainer)

                            Collect receives a set of samples. This method is never called concurrently, and only while the context for Run() is valid, but should defer as much work as possible to Run().

                            func (*Collector) GetRequiredSystemTags

                            func (c *Collector) GetRequiredSystemTags() stats.SystemTagSet

                              GetRequiredSystemTags returns which sample tags are needed by this collector

                              func (*Collector) Init

                              func (c *Collector) Init() error

                                Init is called between the collector's creation and the call to Run(). You should do any lengthy setup here rather than in New.

                                func (c *Collector) Link() string

                                  Link return a link that is shown to the user.

                                  func (*Collector) Run

                                  func (c *Collector) Run(ctx context.Context)

                                    Run is called in a goroutine and starts the collector. Should commit samples to the backend at regular intervals and when the context is terminated.

                                    func (*Collector) SetRunStatus

                                    func (c *Collector) SetRunStatus(status lib.RunStatus)

                                      SetRunStatus Set run status

                                      type Config

                                      type Config struct {
                                      	// TODO: refactor common stuff between cloud execution and output
                                      	Token           null.String `json:"token" envconfig:"K6_CLOUD_TOKEN"`
                                      	DeprecatedToken null.String `json:"-" envconfig:"K6CLOUD_TOKEN"`
                                      	ProjectID       null.Int    `json:"projectID" envconfig:"K6_CLOUD_PROJECT_ID"`
                                      	Name            null.String `json:"name" envconfig:"K6_CLOUD_NAME"`
                                      	Host        null.String `json:"host" envconfig:"K6_CLOUD_HOST"`
                                      	LogsTailURL null.String `json:"-" envconfig:"K6_CLOUD_LOGS_TAIL_URL"`
                                      	PushRefID   null.String `json:"pushRefID" envconfig:"K6_CLOUD_PUSH_REF_ID"`
                                      	WebAppURL   null.String `json:"webAppURL" envconfig:"K6_CLOUD_WEB_APP_URL"`
                                      	NoCompress  null.Bool   `json:"noCompress" envconfig:"K6_CLOUD_NO_COMPRESS"`
                                      	MaxMetricSamplesPerPackage null.Int `json:"maxMetricSamplesPerPackage" envconfig:"K6_CLOUD_MAX_METRIC_SAMPLES_PER_PACKAGE"`
                                      	// The time interval between periodic API calls for sending samples to the cloud ingest service.
                                      	MetricPushInterval types.NullDuration `json:"metricPushInterval" envconfig:"K6_CLOUD_METRIC_PUSH_INTERVAL"`
                                      	// This is how many concurrent pushes will be done at the same time to the cloud
                                      	MetricPushConcurrency null.Int `json:"metricPushConcurrency" envconfig:"K6_CLOUD_METRIC_PUSH_CONCURRENCY"`
                                      	// If specified and is greater than 0, sample aggregation with that period is enabled
                                      	AggregationPeriod types.NullDuration `json:"aggregationPeriod" envconfig:"K6_CLOUD_AGGREGATION_PERIOD"`
                                      	// If aggregation is enabled, this is how often new HTTP trails will be sorted into buckets and sub-buckets and aggregated.
                                      	AggregationCalcInterval types.NullDuration `json:"aggregationCalcInterval" envconfig:"K6_CLOUD_AGGREGATION_CALC_INTERVAL"`
                                      	// If aggregation is enabled, this specifies how long we'll wait for period samples to accumulate before trying to aggregate them.
                                      	AggregationWaitPeriod types.NullDuration `json:"aggregationWaitPeriod" envconfig:"K6_CLOUD_AGGREGATION_WAIT_PERIOD"`
                                      	// If aggregation is enabled, but the collected samples for a certain AggregationPeriod after AggregationPushDelay has passed are less than this number, they won't be aggregated.
                                      	AggregationMinSamples null.Int `json:"aggregationMinSamples" envconfig:"K6_CLOUD_AGGREGATION_MIN_SAMPLES"`
                                      	// If this is enabled and a sub-bucket has more than AggregationMinSamples HTTP trails in it, they would all be
                                      	// aggregated without attempting to find and separate any outlier metrics first.
                                      	// IMPORTANT: This is intended for testing purposes only or, in extreme cases, when the result precision
                                      	// isn't very important and the improved aggregation percentage would be worth the potentially huge loss
                                      	// of metric granularity and possible masking of any outlier samples.
                                      	AggregationSkipOutlierDetection null.Bool `json:"aggregationSkipOutlierDetection" envconfig:"K6_CLOUD_AGGREGATION_SKIP_OUTLIER_DETECTION"`
                                      	// If aggregation and outlier detection are enabled, this option specifies the
                                      	// number of HTTP trails in a sub-bucket that determine which quartile-calculating
                                      	// algorithm would be used:
                                      	// - for fewer samples (between MinSamples and OutlierAlgoThreshold), a more precise
                                      	//   (i.e. supporting interpolation), but also more computationally-heavy sorting
                                      	//   algorithm will be used to find the quartiles.
                                      	// - if there are more samples than OutlierAlgoThreshold in the sub-bucket, a
                                      	//   QuickSelect-based ( algorithm will
                                      	//   be used. It doesn't support interpolation, so there's a small loss of precision
                                      	//   in the outlier detection, but it's not as resource-heavy as the sorting algorithm.
                                      	AggregationOutlierAlgoThreshold null.Int `json:"aggregationOutlierAlgoThreshold" envconfig:"K6_CLOUD_AGGREGATION_OUTLIER_ALGO_THRESHOLD"`
                                      	// The radius (as a fraction) from the median at which to sample Q1 and Q3.
                                      	// By default it's one quarter (0.25) and if set to something different, the Q in IQR
                                      	// won't make much sense... But this would allow us to select tighter sample groups for
                                      	// aggregation if we want.
                                      	AggregationOutlierIqrRadius null.Float `json:"aggregationOutlierIqrRadius" envconfig:"K6_CLOUD_AGGREGATION_OUTLIER_IQR_RADIUS"`
                                      	// Connection or request times with how many IQRs below Q1 to consier as non-aggregatable outliers.
                                      	AggregationOutlierIqrCoefLower null.Float `json:"aggregationOutlierIqrCoefLower" envconfig:"K6_CLOUD_AGGREGATION_OUTLIER_IQR_COEF_LOWER"`
                                      	// Connection or request times with how many IQRs above Q3 to consier as non-aggregatable outliers.
                                      	AggregationOutlierIqrCoefUpper null.Float `json:"aggregationOutlierIqrCoefUpper" envconfig:"K6_CLOUD_AGGREGATION_OUTLIER_IQR_COEF_UPPER"`

                                        Config holds all the necessary data and options for sending metrics to the Load Impact cloud. nolint: lll

                                        func NewConfig

                                        func NewConfig() Config

                                          NewConfig creates a new Config instance with default values for some fields.

                                          func (Config) Apply

                                          func (c Config) Apply(cfg Config) Config

                                            Apply saves config non-zero config values from the passed config in the receiver.

                                            func (*Config) StreamLogsToLogger

                                            func (c *Config) StreamLogsToLogger(
                                            	ctx context.Context, logger logrus.FieldLogger, referenceID string, start time.Duration,
                                            ) error

                                              StreamLogsToLogger streams the logs for the configured test to the provided logger until ctx is Done or an error occurs.

                                              type CreateTestRunResponse

                                              type CreateTestRunResponse struct {
                                              	ReferenceID    string  `json:"reference_id"`
                                              	ConfigOverride *Config `json:"config"`

                                              type ErrorResponse

                                              type ErrorResponse struct {
                                              	Response *http.Response `json:"-"`
                                              	Code        int                 `json:"code"`
                                              	Message     string              `json:"message"`
                                              	Details     map[string][]string `json:"details"`
                                              	FieldErrors map[string][]string `json:"field_errors"`
                                              	Errors      []string            `json:"errors"`

                                                ErrorResponse represents an error cause by talking to the API

                                                func (ErrorResponse) Error

                                                func (e ErrorResponse) Error() string

                                                type LoginResponse

                                                type LoginResponse struct {
                                                	Token string `json:"token"`

                                                type ResultStatus

                                                type ResultStatus int
                                                const (
                                                	ResultStatusPassed ResultStatus = 0
                                                	ResultStatusFailed ResultStatus = 1

                                                type Sample

                                                type Sample struct {
                                                	Type   string      `json:"type"`
                                                	Metric string      `json:"metric"`
                                                	Data   interface{} `json:"data"`

                                                  Sample is the generic struct that contains all types of data that we send to the cloud. easyjson:json

                                                  func NewSampleFromTrail

                                                  func NewSampleFromTrail(trail *httpext.Trail) *Sample

                                                    NewSampleFromTrail just creates a ready-to-send Sample instance directly from a httpext.Trail.

                                                    func (Sample) MarshalEasyJSON

                                                    func (v Sample) MarshalEasyJSON(w *jwriter.Writer)

                                                      MarshalEasyJSON supports easyjson.Marshaler interface

                                                      func (*Sample) UnmarshalEasyJSON

                                                      func (v *Sample) UnmarshalEasyJSON(l *jlexer.Lexer)

                                                        UnmarshalEasyJSON supports easyjson.Unmarshaler interface

                                                        func (*Sample) UnmarshalJSON

                                                        func (ct *Sample) UnmarshalJSON(p []byte) error

                                                          UnmarshalJSON decodes the Data into the corresponding struct

                                                          type SampleDataAggregatedHTTPReqs

                                                          type SampleDataAggregatedHTTPReqs struct {
                                                          	Time   int64             `json:"time,string"`
                                                          	Type   string            `json:"type"`
                                                          	Count  uint64            `json:"count"`
                                                          	Tags   *stats.SampleTags `json:"tags,omitempty"`
                                                          	Values struct {
                                                          		Duration       AggregatedMetric `json:"http_req_duration"`
                                                          		Blocked        AggregatedMetric `json:"http_req_blocked"`
                                                          		Connecting     AggregatedMetric `json:"http_req_connecting"`
                                                          		TLSHandshaking AggregatedMetric `json:"http_req_tls_handshaking"`
                                                          		Sending        AggregatedMetric `json:"http_req_sending"`
                                                          		Waiting        AggregatedMetric `json:"http_req_waiting"`
                                                          		Receiving      AggregatedMetric `json:"http_req_receiving"`
                                                          	} `json:"values"`

                                                            SampleDataAggregatedHTTPReqs is used in aggregated samples for HTTP requests. easyjson:json

                                                            func (*SampleDataAggregatedHTTPReqs) Add

                                                            func (sdagg *SampleDataAggregatedHTTPReqs) Add(trail *httpext.Trail)

                                                              Add updates all agregated values with the supplied trail data

                                                              func (*SampleDataAggregatedHTTPReqs) CalcAverages

                                                              func (sdagg *SampleDataAggregatedHTTPReqs) CalcAverages()

                                                                CalcAverages calculates and sets all `Avg` properties in the `Values` struct

                                                                func (SampleDataAggregatedHTTPReqs) MarshalEasyJSON

                                                                func (v SampleDataAggregatedHTTPReqs) MarshalEasyJSON(w *jwriter.Writer)

                                                                  MarshalEasyJSON supports easyjson.Marshaler interface

                                                                  func (*SampleDataAggregatedHTTPReqs) UnmarshalEasyJSON

                                                                  func (v *SampleDataAggregatedHTTPReqs) UnmarshalEasyJSON(l *jlexer.Lexer)

                                                                    UnmarshalEasyJSON supports easyjson.Unmarshaler interface

                                                                    type SampleDataMap

                                                                    type SampleDataMap struct {
                                                                    	Time   int64              `json:"time,string"`
                                                                    	Type   stats.MetricType   `json:"type"`
                                                                    	Tags   *stats.SampleTags  `json:"tags,omitempty"`
                                                                    	Values map[string]float64 `json:"values,omitempty"`

                                                                      SampleDataMap is used by samples that contain multiple values, currently that's only iteration metrics (`iter_li_all`) and unaggregated HTTP requests (`http_req_li_all`). easyjson:json

                                                                      func (SampleDataMap) MarshalEasyJSON

                                                                      func (v SampleDataMap) MarshalEasyJSON(w *jwriter.Writer)

                                                                        MarshalEasyJSON supports easyjson.Marshaler interface

                                                                        func (*SampleDataMap) UnmarshalEasyJSON

                                                                        func (v *SampleDataMap) UnmarshalEasyJSON(l *jlexer.Lexer)

                                                                          UnmarshalEasyJSON supports easyjson.Unmarshaler interface

                                                                          type SampleDataSingle

                                                                          type SampleDataSingle struct {
                                                                          	Time  int64             `json:"time,string"`
                                                                          	Type  stats.MetricType  `json:"type"`
                                                                          	Tags  *stats.SampleTags `json:"tags,omitempty"`
                                                                          	Value float64           `json:"value"`

                                                                            SampleDataSingle is used in all simple un-aggregated single-value samples. easyjson:json

                                                                            func (SampleDataSingle) MarshalEasyJSON

                                                                            func (v SampleDataSingle) MarshalEasyJSON(w *jwriter.Writer)

                                                                              MarshalEasyJSON supports easyjson.Marshaler interface

                                                                              func (*SampleDataSingle) UnmarshalEasyJSON

                                                                              func (v *SampleDataSingle) UnmarshalEasyJSON(l *jlexer.Lexer)

                                                                                UnmarshalEasyJSON supports easyjson.Unmarshaler interface

                                                                                type TestProgressResponse

                                                                                type TestProgressResponse struct {
                                                                                	RunStatusText string        `json:"run_status_text"`
                                                                                	RunStatus     lib.RunStatus `json:"run_status"`
                                                                                	ResultStatus  ResultStatus  `json:"result_status"`
                                                                                	Progress      float64       `json:"progress"`

                                                                                type TestRun

                                                                                type TestRun struct {
                                                                                	Name       string              `json:"name"`
                                                                                	ProjectID  int64               `json:"project_id,omitempty"`
                                                                                	VUsMax     int64               `json:"vus"`
                                                                                	Thresholds map[string][]string `json:"thresholds"`
                                                                                	// Duration of test in seconds. -1 for unknown length, 0 for continuous running.
                                                                                	Duration int64 `json:"duration"`

                                                                                type ThresholdResult

                                                                                type ThresholdResult map[string]map[string]bool