config

package module
v0.74.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 9, 2026 License: Apache-2.0 Imports: 20 Imported by: 14

Documentation

Overview

Package config provides log configuration structures and utilities

Index

Constants

View Source
const (
	GzipCompressionKind  = "gzip"
	GzipCompressionLevel = 6
	ZstdCompressionKind  = "zstd"
	ZstdCompressionLevel = 1
)

CompressionKind constants

View Source
const (
	TCPType           = "tcp"
	UDPType           = "udp"
	FileType          = "file"
	DockerType        = "docker"
	ContainerdType    = "containerd"
	JournaldType      = "journald"
	IntegrationType   = "integration"
	WindowsEventType  = "windows_event"
	StringChannelType = "string_channel"

	// UTF16BE for UTF-16 Big endian encoding
	UTF16BE string = "utf-16-be"
	// UTF16LE for UTF-16 Little Endian encoding
	UTF16LE string = "utf-16-le"
	// SHIFTJIS for Shift JIS (Japanese) encoding
	SHIFTJIS string = "shift-jis"
)

Logs source types

View Source
const (
	ForceBeginning = iota
	ForceEnd
	Beginning
	End
)

Tailing Modes

View Source
const (
	ExcludeAtMatch   = "exclude_at_match"
	IncludeAtMatch   = "include_at_match"
	MaskSequences    = "mask_sequences"
	MultiLine        = "multi_line"
	ExcludeTruncated = "exclude_truncated"
)

Processing rule types

View Source
const AgentJSONIntakeProtocol = "agent-json"

AgentJSONIntakeProtocol agent json protocol

View Source
const (
	// DateFormat is the default date format.
	DateFormat = "2006-01-02T15:04:05.000000000Z"
)
View Source
const EmptyPathPrefix = ""

EmptyPathPrefix is the default path prefix for the endpoint.

Variables

View Source
var ErrEmptyFingerprintConfig = errors.New("fingerprint config is empty - no fields are set")

ErrEmptyFingerprintConfig is returned when a fingerprint config is empty

Functions

func AggregationTimeout

func AggregationTimeout(coreConfig pkgconfigmodel.Reader) time.Duration

AggregationTimeout is used when performing aggregation operations

func CompileProcessingRules

func CompileProcessingRules(rules []*ProcessingRule) error

CompileProcessingRules compiles all processing rule regular expressions.

func ContainsWildcard

func ContainsWildcard(path string) bool

ContainsWildcard returns true if the path contains any wildcard character

func ExpectedTagsDuration

func ExpectedTagsDuration(coreConfig pkgconfigmodel.Reader) time.Duration

ExpectedTagsDuration returns a duration of the time expected tags will be submitted for.

func GlobalFingerprintConfig added in v0.71.0

func GlobalFingerprintConfig(coreConfig pkgconfigmodel.Reader) (*types.FingerprintConfig, error)

GlobalFingerprintConfig returns the global fingerprint configuration to apply to all logs.

func HasMultiLineRule

func HasMultiLineRule(rules []*ProcessingRule) bool

HasMultiLineRule returns true if the rule set contains a multi_line rule

func IsExpectedTagsSet

func IsExpectedTagsSet(coreConfig pkgconfigmodel.Reader) bool

IsExpectedTagsSet returns boolean showing if expected tags feature is enabled.

func MaxMessageSizeBytes

func MaxMessageSizeBytes(coreConfig pkgconfigmodel.Reader) int

MaxMessageSizeBytes is used to cap the maximum log message size in bytes

func TaggerWarmupDuration

func TaggerWarmupDuration(coreConfig pkgconfigmodel.Reader) time.Duration

TaggerWarmupDuration is used to configure the tag providers

func ValidateFingerprintConfig added in v0.71.0

func ValidateFingerprintConfig(config *types.FingerprintConfig) error

ValidateFingerprintConfig validates the fingerprint config and returns an error if the config is invalid

func ValidateProcessingRules

func ValidateProcessingRules(rules []*ProcessingRule) error

ValidateProcessingRules validates the rules and raises an error if one is misconfigured. Each processing rule must have: - a valid name - a valid type - a valid pattern that compiles

Types

type AutoMultilineSample added in v0.68.0

type AutoMultilineSample struct {
	// Sample is a raw log message sample used to aggregate logs.
	Sample string `mapstructure:"sample" json:"sample" yaml:"sample"`
	// MatchThreshold is the ratio of tokens that must match between the sample and the log message to consider it a match.
	// From a user perspective, this is how similar the log has to be to the sample to be considered a match.
	// Optional - Default value is 0.75.
	MatchThreshold *float64 `mapstructure:"match_threshold,omitempty" json:"match_threshold,omitempty"`
	// Regex is a pattern used to aggregate logs. NOTE that you can use either a sample or a regex, but not both.
	Regex string `mapstructure:"regex,omitempty" json:"regex,omitempty"`
	// Label is the label to apply to the log message if it matches the sample.
	// Optional - Default value is "start_group".
	Label *string `mapstructure:"label,omitempty" json:"label,omitempty"`
}

AutoMultilineSample defines a sample used to create auto multiline detection rules

type ChannelMessage

type ChannelMessage struct {
	Content []byte
	// Optional. Must be UTC. If not provided, time.Now().UTC() will be used
	// Used in the Serverless Agent
	Timestamp time.Time
	IsError   bool
}

ChannelMessage represents a log line sent to datadog, with its metadata

type EPIntakeVersion

type EPIntakeVersion uint8

EPIntakeVersion is the events platform intake API version

const (

	// EPIntakeVersion1 is version 1 of the envets platform intake API
	EPIntakeVersion1 EPIntakeVersion
	// EPIntakeVersion2 is version 2 of the envets platform intake API
	EPIntakeVersion2
)

type Endpoint

type Endpoint struct {
	Host                    string `mapstructure:"host" json:"host"`
	Port                    int
	PathPrefix              string `mapstructure:"path_prefix" json:"path_prefix"`
	UseCompression          bool   `mapstructure:"use_compression" json:"use_compression"`
	CompressionKind         string `mapstructure:"compression_kind" json:"compression_kind"`
	CompressionLevel        int    `mapstructure:"compression_level" json:"compression_level"`
	ProxyAddress            string
	IsMRF                   bool `mapstructure:"-" json:"-"`
	ConnectionResetInterval time.Duration

	BackoffFactor    float64
	BackoffBase      float64
	BackoffMax       float64
	RecoveryInterval int
	RecoveryReset    bool

	Version   EPIntakeVersion
	TrackType IntakeTrackType
	Protocol  IntakeProtocol
	Origin    IntakeOrigin
	// contains filtered or unexported fields
}

Endpoint holds all the organization and network parameters to send logs to Datadog.

func NewEndpoint added in v0.53.0

func NewEndpoint(apiKey string, apiKeyConfigPath string, host string, port int, pathPrefix string, useSSL bool) Endpoint

NewEndpoint returns a new Endpoint with the minimal field initialized.

func NewMockEndpoint added in v0.66.0

func NewMockEndpoint() Endpoint

NewMockEndpoint creates a new reliable endpoint with default test values

func NewMockEndpointWithOptions added in v0.66.0

func NewMockEndpointWithOptions(opts map[string]interface{}) Endpoint

NewMockEndpointWithOptions creates a new reliable endpoint with customizable options

func (*Endpoint) GetAPIKey added in v0.53.0

func (e *Endpoint) GetAPIKey() string

GetAPIKey returns the latest API Key for the Endpoint, including when the configuration gets updated at runtime

func (*Endpoint) GetStatus

func (e *Endpoint) GetStatus(prefix string, useHTTP bool) string

GetStatus returns the endpoint status

func (*Endpoint) IsReliable

func (e *Endpoint) IsReliable() bool

IsReliable returns true if the endpoint is reliable. Endpoints are reliable by default.

func (*Endpoint) UseSSL

func (e *Endpoint) UseSSL() bool

UseSSL returns the useSSL config setting

type EndpointCompressionOptions added in v0.67.0

type EndpointCompressionOptions struct {
	CompressionKind  string
	CompressionLevel int
}

EndpointCompressionOptions is the compression options for the endpoint

type Endpoints

type Endpoints struct {
	Main                   Endpoint
	Endpoints              []Endpoint
	UseProto               bool
	UseHTTP                bool
	BatchWait              time.Duration
	BatchMaxConcurrentSend int
	BatchMaxSize           int
	BatchMaxContentSize    int
	InputChanSize          int
}

Endpoints holds the main endpoint and additional ones to dualship logs.

func BuildEndpoints

func BuildEndpoints(coreConfig pkgconfigmodel.Reader, httpConnectivity HTTPConnectivity, intakeTrackType IntakeTrackType, intakeProtocol IntakeProtocol, intakeOrigin IntakeOrigin) (*Endpoints, error)

BuildEndpoints returns the endpoints to send logs.

func BuildEndpointsWithConfig

func BuildEndpointsWithConfig(coreConfig pkgconfigmodel.Reader, logsConfig *LogsConfigKeys, endpointPrefix string, httpConnectivity HTTPConnectivity, intakeTrackType IntakeTrackType, intakeProtocol IntakeProtocol, intakeOrigin IntakeOrigin) (*Endpoints, error)

BuildEndpointsWithConfig returns the endpoints to send logs.

func BuildEndpointsWithVectorOverride

func BuildEndpointsWithVectorOverride(coreConfig pkgconfigmodel.Reader, httpConnectivity HTTPConnectivity, intakeTrackType IntakeTrackType, intakeProtocol IntakeProtocol, intakeOrigin IntakeOrigin) (*Endpoints, error)

BuildEndpointsWithVectorOverride returns the endpoints to send logs and enforce Vector override config keys

func BuildHTTPEndpoints

func BuildHTTPEndpoints(coreConfig pkgconfigmodel.Reader, intakeTrackType IntakeTrackType, intakeProtocol IntakeProtocol, intakeOrigin IntakeOrigin) (*Endpoints, error)

BuildHTTPEndpoints returns the HTTP endpoints to send logs to.

func BuildHTTPEndpointsWithCompressionOverride added in v0.67.0

func BuildHTTPEndpointsWithCompressionOverride(coreConfig pkgconfigmodel.Reader, logsConfig *LogsConfigKeys, endpointPrefix string, intakeTrackType IntakeTrackType, intakeProtocol IntakeProtocol, intakeOrigin IntakeOrigin, compressionOptions EndpointCompressionOptions) (*Endpoints, error)

BuildHTTPEndpointsWithCompressionOverride returns the HTTP endpoints to send logs to with compression options.

func BuildHTTPEndpointsWithConfig

func BuildHTTPEndpointsWithConfig(coreConfig pkgconfigmodel.Reader, logsConfig *LogsConfigKeys, endpointPrefix string, intakeTrackType IntakeTrackType, intakeProtocol IntakeProtocol, intakeOrigin IntakeOrigin) (*Endpoints, error)

BuildHTTPEndpointsWithConfig returns the HTTP endpoints to send logs to.

func BuildHTTPEndpointsWithVectorOverride

func BuildHTTPEndpointsWithVectorOverride(coreConfig pkgconfigmodel.Reader, intakeTrackType IntakeTrackType, intakeProtocol IntakeProtocol, intakeOrigin IntakeOrigin) (*Endpoints, error)

BuildHTTPEndpointsWithVectorOverride returns the HTTP endpoints to send logs to.

func BuildServerlessEndpoints

func BuildServerlessEndpoints(coreConfig pkgconfigmodel.Reader, intakeTrackType IntakeTrackType, intakeProtocol IntakeProtocol) (*Endpoints, error)

BuildServerlessEndpoints returns the endpoints to send logs for the Serverless agent.

func NewEndpoints

func NewEndpoints(main Endpoint, additionalEndpoints []Endpoint, useProto bool, useHTTP bool) *Endpoints

NewEndpoints returns a new endpoints composite with default batching settings

func NewEndpointsWithBatchSettings

func NewEndpointsWithBatchSettings(main Endpoint, additionalEndpoints []Endpoint, useProto bool, useHTTP bool, batchWait time.Duration, batchMaxConcurrentSend int, batchMaxSize int, batchMaxContentSize int, inputChanSize int) *Endpoints

NewEndpointsWithBatchSettings returns a new endpoints composite with non-default batching settings specified

func NewMockEndpoints added in v0.66.0

func NewMockEndpoints(endpoints []Endpoint) *Endpoints

NewMockEndpoints creates a new Endpoints struct with a single reliable endpoint

func NewMockEndpointsWithOptions added in v0.66.0

func NewMockEndpointsWithOptions(endpointArray []Endpoint, opts map[string]interface{}) *Endpoints

NewMockEndpointsWithOptions creates a new Endpoints struct with customizable individual endpoints and options

func (*Endpoints) GetReliableEndpoints

func (e *Endpoints) GetReliableEndpoints() []Endpoint

GetReliableEndpoints returns additional endpoints that can be failed over to and block the pipeline in the event of an outage and will retry errors. These endpoints are treated the same as the main endpoint.

func (*Endpoints) GetStatus

func (e *Endpoints) GetStatus() []string

GetStatus returns the endpoints status, one line per endpoint

func (*Endpoints) GetUnReliableEndpoints

func (e *Endpoints) GetUnReliableEndpoints() []Endpoint

GetUnReliableEndpoints returns additional endpoints that do not guarantee logs are received in the event of an error.

type HTTPConnectivity

type HTTPConnectivity bool

HTTPConnectivity is the status of the HTTP connectivity

var (
	// HTTPConnectivitySuccess is the status for successful HTTP connectivity
	HTTPConnectivitySuccess HTTPConnectivity = true
	// HTTPConnectivityFailure is the status for failed HTTP connectivity
	HTTPConnectivityFailure HTTPConnectivity = false
)

type IntakeOrigin

type IntakeOrigin string

IntakeOrigin indicates the log source to use for an endpoint intake.

const DDOTIntakeOrigin IntakeOrigin = "ddot"

DDOTIntakeOrigin is the DDOT Collector origin

const DefaultIntakeOrigin IntakeOrigin = "agent"

DefaultIntakeOrigin indicates that no special DD_SOURCE header is in use for the endpoint intake track type.

const OTelCollectorIntakeOrigin IntakeOrigin = "otel-collector"

OTelCollectorIntakeOrigin is the OSS OTel Collector origin

const ServerlessIntakeOrigin IntakeOrigin = "serverless"

ServerlessIntakeOrigin indicates that data was sent by serverless

type IntakeProtocol

type IntakeProtocol string

IntakeProtocol indicates the protocol to use for an endpoint intake.

const DefaultIntakeProtocol IntakeProtocol = ""

DefaultIntakeProtocol indicates that no special protocol is in use for the endpoint intake track type.

type IntakeTrackType

type IntakeTrackType string

IntakeTrackType indicates the type of an endpoint intake.

type LogsConfig

type LogsConfig struct {
	Type string

	IntegrationName string

	Port        int    // Network
	IdleTimeout string `mapstructure:"idle_timeout" json:"idle_timeout" yaml:"idle_timeout"` // Network
	Path        string // File, Journald

	Encoding     string           `mapstructure:"encoding" json:"encoding" yaml:"encoding"`                   // File
	ExcludePaths StringSliceField `mapstructure:"exclude_paths" json:"exclude_paths" yaml:"exclude_paths"`    // File
	TailingMode  string           `mapstructure:"start_position" json:"start_position" yaml:"start_position"` // File

	ConfigID           string           `mapstructure:"config_id" json:"config_id" yaml:"config_id"`                            // Journald
	IncludeSystemUnits StringSliceField `mapstructure:"include_units" json:"include_units" yaml:"include_units"`                // Journald
	ExcludeSystemUnits StringSliceField `mapstructure:"exclude_units" json:"exclude_units" yaml:"exclude_units"`                // Journald
	IncludeUserUnits   StringSliceField `mapstructure:"include_user_units" json:"include_user_units" yaml:"include_user_units"` // Journald
	ExcludeUserUnits   StringSliceField `mapstructure:"exclude_user_units" json:"exclude_user_units" yaml:"exclude_user_units"` // Journald
	IncludeMatches     StringSliceField `mapstructure:"include_matches" json:"include_matches" yaml:"include_matches"`          // Journald
	ExcludeMatches     StringSliceField `mapstructure:"exclude_matches" json:"exclude_matches" yaml:"exclude_matches"`          // Journald
	ContainerMode      bool             `mapstructure:"container_mode" json:"container_mode" yaml:"container_mode"`             // Journald

	Image string // Docker
	Label string // Docker
	// Name contains the container name
	Name string // Docker
	// Identifier contains the container ID.  This is also set for File sources and used to
	// determine the appropriate tags for the logs.
	Identifier string // Docker, File

	ChannelPath string `mapstructure:"channel_path" json:"channel_path" yaml:"channel_path"` // Windows Event
	Query       string // Windows Event

	// used as input only by the Channel tailer.
	// could have been unidirectional but the tailer could not close it in this case.
	Channel chan *ChannelMessage

	// ChannelTags are the tags attached to messages on Channel; unlike Tags this can be
	// modified at runtime (as long as ChannelTagsMutex is held).
	ChannelTags StringSliceField

	// ChannelTagsMutex guards ChannelTags.
	ChannelTagsMutex sync.Mutex

	Service         string
	Source          string
	SourceCategory  string
	Tags            StringSliceField
	ProcessingRules []*ProcessingRule `mapstructure:"log_processing_rules" json:"log_processing_rules" yaml:"log_processing_rules"`
	// ProcessRawMessage is used to process the raw message instead of only the content part of the message.
	ProcessRawMessage *bool `mapstructure:"process_raw_message" json:"process_raw_message" yaml:"process_raw_message"`

	AutoMultiLine               *bool   `mapstructure:"auto_multi_line_detection" json:"auto_multi_line_detection" yaml:"auto_multi_line_detection"`
	AutoMultiLineSampleSize     int     `mapstructure:"auto_multi_line_sample_size" json:"auto_multi_line_sample_size" yaml:"auto_multi_line_sample_size"`
	AutoMultiLineMatchThreshold float64 `mapstructure:"auto_multi_line_match_threshold" json:"auto_multi_line_match_threshold" yaml:"auto_multi_line_match_threshold"`
	// AutoMultiLineOptions provides detailed configuration for auto multi-line detection specific to this source.
	// It maps to the 'auto_multi_line' key in the YAML configuration.
	AutoMultiLineOptions *SourceAutoMultiLineOptions `mapstructure:"auto_multi_line" json:"auto_multi_line" yaml:"auto_multi_line"`
	// CustomSamples holds the raw string content of the 'auto_multi_line_detection_custom_samples' YAML block.
	// Downstream code will be responsible for parsing this string.
	AutoMultiLineSamples []*AutoMultilineSample   `` /* 151-byte string literal not displayed */
	FingerprintConfig    *types.FingerprintConfig `mapstructure:"fingerprint_config" json:"fingerprint_config" yaml:"fingerprint_config"`

	// IntegrationSource is the source of the integration file that contains this source.
	IntegrationSource string `mapstructure:"integration_source" json:"integration_source" yaml:"integration_source"`
	// IntegrationFileIndex is the index of the integration file that contains this source.
	IntegrationSourceIndex int `mapstructure:"integration_source_index" json:"integration_source_index" yaml:"integration_source_index"`
}

LogsConfig represents a log source config, which can be for instance a file to tail or a port to listen to.

func ParseJSON

func ParseJSON(data []byte) ([]*LogsConfig, error)

ParseJSON parses the data formatted in JSON returns an error if the parsing failed.

func ParseJSONOrYAML added in v0.69.0

func ParseJSONOrYAML(data []byte) ([]*LogsConfig, error)

ParseJSONOrYAML parses the data trying Json first, then with a fallback to YAML.

func ParseYAML

func ParseYAML(data []byte) ([]*LogsConfig, error)

ParseYAML parses the data formatted in YAML, returns an error if the parsing failed.

func (*LogsConfig) AutoMultiLineEnabled

func (c *LogsConfig) AutoMultiLineEnabled(coreConfig pkgconfigmodel.Reader) bool

AutoMultiLineEnabled determines whether auto multi line detection is enabled for this config, considering both the agent-wide logs_config.auto_multi_line_detection and any config for this particular log source.

func (*LogsConfig) Dump

func (c *LogsConfig) Dump(multiline bool) string

Dump dumps the contents of this struct to a string, for debugging purposes.

func (*LogsConfig) LegacyAutoMultiLineEnabled added in v0.65.0

func (c *LogsConfig) LegacyAutoMultiLineEnabled(coreConfig pkgconfigmodel.Reader) bool

LegacyAutoMultiLineEnabled determines whether the agent has fallen back to legacy auto multi line detection for compatibility reasons.

func (*LogsConfig) PublicJSON

func (c *LogsConfig) PublicJSON() ([]byte, error)

PublicJSON serialize the structure to make sure we only export fields that can be relevant to customers. This is used to send the logs config to the backend as part of the metadata payload.

func (*LogsConfig) ShouldProcessRawMessage

func (c *LogsConfig) ShouldProcessRawMessage() bool

ShouldProcessRawMessage returns if the raw message should be processed instead of only the message content. This is tightly linked to how messages are transmitted through the pipeline. If returning true, tailers using structured message (journald, windowsevents) will fall back to original behavior of sending the whole message (e.g. JSON for journald) for post-processing. Otherwise, the message content is extracted from the structured message and only this part is post-processed and sent to the intake.

func (*LogsConfig) Validate

func (c *LogsConfig) Validate() error

Validate returns an error if the config is misconfigured

type LogsConfigKeys

type LogsConfigKeys struct {
	// contains filtered or unexported fields
}

LogsConfigKeys stores logs configuration keys stored in YAML configuration files

func NewLogsConfigKeys

func NewLogsConfigKeys(configPrefix string, config pkgconfigmodel.Reader) *LogsConfigKeys

NewLogsConfigKeys returns a new logs configuration keys set

func NewLogsConfigKeysWithVector

func NewLogsConfigKeysWithVector(configPrefix, vectorPrefix string, config pkgconfigmodel.Reader) *LogsConfigKeys

NewLogsConfigKeysWithVector returns a new logs configuration keys set with vector config keys enabled

type Messages

type Messages struct {
	// contains filtered or unexported fields
}

Messages holds messages and warning that can be displayed in the status Warnings are display at the top of the log section in the status and messages are displayed in the log source that generated the message

func NewMessages

func NewMessages() *Messages

NewMessages initialize Messages with the default values

func (*Messages) AddMessage

func (m *Messages) AddMessage(key string, message string)

AddMessage create a message

func (*Messages) GetMessages

func (m *Messages) GetMessages() []string

GetMessages returns all the messages

func (*Messages) RemoveMessage

func (m *Messages) RemoveMessage(key string)

RemoveMessage removes a message

type ProcessingRule

type ProcessingRule struct {
	Type               string
	Name               string
	ReplacePlaceholder string `mapstructure:"replace_placeholder" json:"replace_placeholder" yaml:"replace_placeholder"`
	Pattern            string
	// TODO: should be moved out
	Regex       *regexp.Regexp
	Placeholder []byte
}

ProcessingRule defines an exclusion or a masking rule to be applied on log lines

func GlobalProcessingRules

func GlobalProcessingRules(coreConfig pkgconfigmodel.Reader) ([]*ProcessingRule, error)

GlobalProcessingRules returns the global processing rules to apply to all logs.

type SourceAutoMultiLineOptions added in v0.68.0

type SourceAutoMultiLineOptions struct {
	// EnableJSONDetection allows to enable or disable the detection of multi-line JSON logs for this source.
	EnableJSONDetection *bool `mapstructure:"enable_json_detection" json:"enable_json_detection" yaml:"enable_json_detection"`

	// EnableDatetimeDetection allows to enable or disable the detection of multi-lines based on leading datetime stamps for this source.
	EnableDatetimeDetection *bool `mapstructure:"enable_datetime_detection" json:"enable_datetime_detection" yaml:"enable_datetime_detection"`

	// MatchThreshold sets the similarity threshold to consider a pattern match for this source.
	TimestampDetectorMatchThreshold *float64 `` /* 133-byte string literal not displayed */

	// TokenizerMaxInputBytes sets the maximum number of bytes the tokenizer will read for this source.
	TokenizerMaxInputBytes *int `mapstructure:"tokenizer_max_input_bytes" json:"tokenizer_max_input_bytes" yaml:"tokenizer_max_input_bytes"`

	// PatternTableMaxSize sets the number of patterns auto multi line can use
	PatternTableMaxSize *int `mapstructure:"pattern_table_max_size" json:"pattern_table_max_size" yaml:"pattern_table_max_size"`

	// PatternTableMatchThreshold sets the threshold for pattern table match for this source.
	PatternTableMatchThreshold *float64 `mapstructure:"pattern_table_match_threshold" json:"pattern_table_match_threshold" yaml:"pattern_table_match_threshold"`

	// EnableJSONAggregation allows to enable or disable the aggregation of multi-line JSON logs for this source.
	EnableJSONAggregation *bool `mapstructure:"enable_json_aggregation" json:"enable_json_aggregation" yaml:"enable_json_aggregation"`

	// TagAggregatedJSON allows to enable or disable the tagging of aggregated JSON logs for this source.
	TagAggregatedJSON *bool `mapstructure:"tag_aggregated_json" json:"tag_aggregated_json" yaml:"tag_aggregated_json"`
}

SourceAutoMultiLineOptions defines per-source auto multi-line detection overrides. These settings allow for fine-grained control over auto multi-line detection for a specific log source, potentially overriding global configurations.

type StringSliceField added in v0.65.0

type StringSliceField []string

StringSliceField is a custom type for unmarshalling comma-separated string values or typical yaml fields into a slice of strings.

func (*StringSliceField) UnmarshalYAML added in v0.65.0

func (t *StringSliceField) UnmarshalYAML(unmarshal func(interface{}) error) error

UnmarshalYAML is a custom unmarshalling function is needed for string array fields to split comma-separated values.

type TailingMode

type TailingMode uint8

TailingMode type

func TailingModeFromString

func TailingModeFromString(mode string) (TailingMode, bool)

TailingModeFromString parses a string and returns a corresponding tailing mode, default to End if not found

func (TailingMode) String

func (mode TailingMode) String() string

TailingModeToString returns seelog string representation for a specified tailing mode. Returns "" for invalid tailing mode.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL