Documentation ¶
Overview ¶
Package v1 contains API Schema definitions for the logging v1 API group
+k8s:deepcopy-gen=package,register +groupName=logging.openshift.io
Package v1 contains API Schema definitions for the logging v1 API group +kubebuilder:object:generate=true +groupName=logging.openshift.io
Index ¶
- Constants
- Variables
- func IsFilterTypeName(s string) bool
- func IsInputTypeName(s string) bool
- func IsOutputTypeName(s string) bool
- func IsReservedOutputName(s string) bool
- func SetCondition(cs *status.Conditions, t status.ConditionType, s corev1.ConditionStatus, ...) bool
- type Application
- type Audit
- type AzureMonitor
- type Cloudwatch
- type ClusterConditions
- type ClusterLogForwarder
- type ClusterLogForwarderList
- type ClusterLogForwarderSpec
- func (in *ClusterLogForwarderSpec) DeepCopy() *ClusterLogForwarderSpec
- func (in *ClusterLogForwarderSpec) DeepCopyInto(out *ClusterLogForwarderSpec)
- func (spec *ClusterLogForwarderSpec) FilterMap() map[string]*FilterSpec
- func (spec *ClusterLogForwarderSpec) HasDefaultOutput() bool
- func (spec *ClusterLogForwarderSpec) InputMap() map[string]*InputSpec
- func (spec *ClusterLogForwarderSpec) OutputMap() map[string]*OutputSpec
- type ClusterLogForwarderStatus
- func (in *ClusterLogForwarderStatus) DeepCopy() *ClusterLogForwarderStatus
- func (in *ClusterLogForwarderStatus) DeepCopyInto(out *ClusterLogForwarderStatus)
- func (status ClusterLogForwarderStatus) GetReadyConditionMessages() []string
- func (status ClusterLogForwarderStatus) IsReady() bool
- func (status *ClusterLogForwarderStatus) Synchronize(newStatus *ClusterLogForwarderStatus) error
- type ClusterLogging
- type ClusterLoggingList
- type ClusterLoggingSpec
- type ClusterLoggingStatus
- type CollectionSpec
- type CollectionStatus
- type CollectorSpec
- type Condition
- type ConditionReason
- type ConditionType
- type Conditions
- type CurationSpec
- type CurationStatus
- type CurationType
- type CuratorSpec
- type CuratorStatus
- type DropCondition
- type DropTest
- type Elasticsearch
- type ElasticsearchClusterConditions
- type ElasticsearchRoleType
- type ElasticsearchSpec
- type ElasticsearchStatus
- type ElasticsearchStructuredSpec
- type EventCollectionSpec
- type EventCollectionStatus
- type EventCollectionType
- type FilterSpec
- type FilterTypeSpec
- type FluentdBufferSpec
- type FluentdCollectorStatus
- type FluentdForward
- type FluentdForwarderSpec
- type FluentdInFileSpec
- type FluentdNormalizerStatus
- type FluentdSizeUnit
- type FluentdTimeUnit
- type FlushModeType
- type ForwarderSpec
- type GoogleCloudLogging
- type HTTPReceiver
- type Http
- type Infrastructure
- type InputSpec
- type Kafka
- type KibanaSpec
- type KibanaStatus
- type KubeAPIAudit
- type LabelSelector
- type LimitSpec
- type LogCollectionSpec
- type LogCollectionStatus
- type LogCollectionType
- type LogGroupByType
- type LogStoreSpec
- type LogStoreStatus
- type LogStoreType
- type Loki
- type LokiStackStoreSpec
- type ManagementState
- type NamedConditions
- func (in NamedConditions) DeepCopy() NamedConditions
- func (in NamedConditions) DeepCopyInto(out *NamedConditions)
- func (nc NamedConditions) IsAllReady() bool
- func (nc NamedConditions) Set(name string, cond status.Condition) bool
- func (nc NamedConditions) SetCondition(name string, t status.ConditionType, s corev1.ConditionStatus, ...) bool
- func (nc NamedConditions) Synchronize(newNamedCondition NamedConditions) error
- type NamespaceContainerSpec
- type NormalizerStatus
- type NormalizerType
- type OCPConsoleSpec
- type OutputDefaults
- type OutputSecretSpec
- type OutputSpec
- type OutputTLSSpec
- type OutputTuningSpec
- type OutputTypeSpec
- type OverflowActionType
- type PipelineSpec
- type PodStateMap
- type PodStateType
- type ProxySpec
- type PruneFilterSpec
- type ReceiverSpec
- func (in *ReceiverSpec) DeepCopy() *ReceiverSpec
- func (in *ReceiverSpec) DeepCopyInto(out *ReceiverSpec)
- func (receiver *ReceiverSpec) GetHTTPFormat() (ret string)
- func (receiver *ReceiverSpec) GetHTTPPort() (ret int32)
- func (receiver *ReceiverSpec) GetSyslogPort() (ret int32)
- func (receiver *ReceiverSpec) IsAuditHttpReceiver() bool
- func (receiver *ReceiverSpec) IsHttpReceiver() bool
- func (receiver *ReceiverSpec) IsSyslogReceiver() bool
- type ReceiverTypeSpec
- type RetentionPoliciesSpec
- type RetentionPolicySpec
- type RetryTypeType
- type RouteMap
- type Routes
- type Splunk
- type Syslog
- type SyslogReceiver
- type VisualizationSpec
- type VisualizationStatus
- type VisualizationType
Constants ¶
const ( InputNameApplication = "application" // Non-infrastructure container logs. InputNameInfrastructure = "infrastructure" // Infrastructure containers and system logs. InputNameAudit = "audit" // System audit logs. InputNameReceiver = "receiver" // Receiver to receive logs from non-cluster sources. )
Reserved input names.
const ( OutputDeliveryModeAtLeastOnce = "AtLeastOnce" OutputDeliveryModeAtMostOnce = "AtMostOnce" )
const ( // Ready indicates the service is ready. // // Ready=True means the operands are running and providing some service. // See the Degraded condition to distinguish full service from partial service. // // Ready=False means the operands cannot provide any service, and // the operator cannot recover without some external change. Either // the spec is invalid, or there is some environmental problem that is // outside of the the operator's control. // // Ready=Unknown means the operator is in transition. // ConditionReady status.ConditionType = "Ready" // Degraded indicates partial service is available. // // Degraded=True means the operands can fulfill some of the `spec`, but not all, // even when Ready=True. // // Degraded=False with Ready=True means the operands are providing full service. // // Degraded=Unknown means the operator is in transition. // ConditionDegraded status.ConditionType = "Degraded" ValidationCondition status.ConditionType = "Validation" )
const ( // Invalid spec is ill-formed in some way, or contains unknown references. ReasonInvalid status.ConditionReason = "Invalid" // MissingResources spec refers to resources that can't be located. ReasonMissingResource status.ConditionReason = "MissingResource" // Unused spec defines a valid object but it is never used. ReasonUnused status.ConditionReason = "Unused" // Connecting object is unready because a connection is in progress. ReasonConnecting status.ConditionReason = "Connecting" ValidationFailureReason status.ConditionReason = "ValidationFailure" )
const ( FilterKubeAPIAudit = "kubeAPIAudit" FilterDrop = "drop" FilterPrune = "prune" )
Filter type constants, must match JSON tags of FilterTypeSpec fields.
const ( ReceiverTypeHttp = "http" ReceiverTypeSyslog = "syslog" FormatKubeAPIAudit = "kubeAPIAudit" // Log events in k8s list format, e.g. API audit log events. )
Receiver type constants, must match JSON tags of OutputTypeSpec fields.
const ( // InfrastructureSourceNode are journald logs from the node InfrastructureSourceNode string = "node" // InfrastructureSourceContainer are container logs from workloads deployed // in any of the following namespaces: default, kube*, openshift* InfrastructureSourceContainer string = "container" )
const ( // AuditSourceKube are audit logs from kubernetes API servers AuditSourceKube string = "kubeAPI" // AuditSourceOpenShift are audit logs from OpenShift API servers AuditSourceOpenShift string = "openshiftAPI" // AuditSourceAuditd are audit logs from a node auditd service AuditSourceAuditd string = "auditd" // AuditSourceOVN are audit logs from an Open Virtual Network service AuditSourceOVN string = "ovn" )
const ( OutputTypeCloudwatch = "cloudwatch" OutputTypeElasticsearch = "elasticsearch" OutputTypeFluentdForward = "fluentdForward" OutputTypeSyslog = "syslog" OutputTypeKafka = "kafka" OutputTypeLoki = "loki" OutputTypeGoogleCloudLogging = "googleCloudLogging" OutputTypeSplunk = "splunk" OutputTypeHttp = "http" OutputTypeAzureMonitor = "azureMonitor" )
Output type constants, must match JSON tags of OutputTypeSpec fields.
const ClusterLogForwarderKind = "ClusterLogForwarder"
const DefaultESVersion = 6
DefaultESVersion is the version of ES deployed by default
const FirstESVersionWithoutType = 8
FirstESVersionWithoutType (e.g. v8) is the first version without types
const OutputNameDefault = "default"
OutputNameDefault is the Default log store output name and version
Variables ¶
var ( // GroupVersion is group version used to register these objects GroupVersion = schema.GroupVersion{Group: "logging.openshift.io", Version: "v1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme )
var AuditSources = sets.NewString(AuditSourceKube, AuditSourceOpenShift, AuditSourceAuditd, AuditSourceOVN)
var CondReady = Condition{Type: ConditionReady, Status: corev1.ConditionTrue}
var InfrastructureSources = sets.NewString(InfrastructureSourceNode, InfrastructureSourceContainer)
var ReservedInputNames = sets.NewString(InputNameApplication, InputNameInfrastructure, InputNameAudit)
Functions ¶
func IsFilterTypeName ¶
IsFilterTypeName returns true if capitalized is a known filter type name
func IsInputTypeName ¶
func IsOutputTypeName ¶
IsOutputTypeName returns true if capitalized is a known output type name
func IsReservedOutputName ¶
IsReservedOutputName returns true if s is a reserved output name.
func SetCondition ¶
func SetCondition(cs *status.Conditions, t status.ConditionType, s corev1.ConditionStatus, r status.ConditionReason, format string, args ...interface{}) bool
SetCondition returns true if the condition changed or is new.
Types ¶
type Application ¶
type Application struct { // Namespaces from which to collect application logs. // Only messages from these namespaces are collected. // If absent or empty, logs are collected from all namespaces. This field supports // globs (e.g. mynam*space, *myanmespace) // Deprecated: Use []NamespaceContainerSpec instead. // // +optional // +deprecated Namespaces []string `json:"namespaces,omitempty"` // Selector for logs from pods with matching labels. // Only messages from pods with these labels are collected. // If absent or empty, logs are collected regardless of labels. // // +optional Selector *LabelSelector `json:"selector,omitempty"` // Group limit applied to the aggregated log // flow to this input. The total log flow from this input // cannot exceed the limit. Unsupported // // +optional // +docgen:ignore //+operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:hidden"} GroupLimit *LimitSpec `json:"-"` //`json:"groupLimit,omitempty"` // Container limit applied to each container of the pod(s) selected // by this input. No container of pods on selected by this input can // exceed this limit. This limit is applied per collector deployment. // // +optional ContainerLimit *LimitSpec `json:"containerLimit,omitempty"` // Includes is the set of namespaces and containers to include when collecting logs. // Note: infrastructure namespaces are still excluded for "*" values unless a qualifying glob pattern is specified. // // +optional Includes []NamespaceContainerSpec `json:"includes,omitempty"` // Excludes is the set of namespaces and containers to ignore when collecting logs. // Takes precedence over Includes option. // // +optional Excludes []NamespaceContainerSpec `json:"excludes,omitempty"` }
Application log selector. All conditions in the selector must be satisfied (logical AND) to select logs.
func (*Application) DeepCopy ¶
func (in *Application) DeepCopy() *Application
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Application.
func (*Application) DeepCopyInto ¶
func (in *Application) DeepCopyInto(out *Application)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Audit ¶
type Audit struct { // Sources defines the list of audit sources to collect. // This field is optional and its exclusion results in the collection of all audit sources. Valid sources are: // kubeAPI, openshiftAPI, auditd, ovn // // +optional Sources []string `json:"sources,omitempty"` }
Audit enables audit logs. Filtering may be added in future.
func (*Audit) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Audit.
func (*Audit) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type AzureMonitor ¶
type AzureMonitor struct { //CustomerId che unique identifier for the Log Analytics workspace. //https://learn.microsoft.com/en-us/azure/azure-monitor/logs/data-collector-api?tabs=powershell#request-uri-parameters CustomerId string `json:"customerId,omitempty"` //LogType the record type of the data that is being submitted. //Can only contain letters, numbers, and underscores (_), and may not exceed 100 characters. //https://learn.microsoft.com/en-us/azure/azure-monitor/logs/data-collector-api?tabs=powershell#request-headers LogType string `json:"logType,omitempty"` //AzureResourceId the Resource ID of the Azure resource the data should be associated with. //https://learn.microsoft.com/en-us/azure/azure-monitor/logs/data-collector-api?tabs=powershell#request-headers // +optional AzureResourceId string `json:"azureResourceId,omitempty"` //Host alternative host for dedicated Azure regions. (for example for China region) //https://docs.azure.cn/en-us/articles/guidance/developerdifferences#check-endpoints-in-azure // +optional Host string `json:"host,omitempty"` }
func (*AzureMonitor) DeepCopy ¶
func (in *AzureMonitor) DeepCopy() *AzureMonitor
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMonitor.
func (*AzureMonitor) DeepCopyInto ¶
func (in *AzureMonitor) DeepCopyInto(out *AzureMonitor)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Cloudwatch ¶
type Cloudwatch struct { // +required Region string `json:"region,omitempty"` //GroupBy defines the strategy for grouping logstreams // +required //+kubebuilder:validation:Enum:=logType;namespaceName;namespaceUUID GroupBy LogGroupByType `json:"groupBy,omitempty"` //GroupPrefix Add this prefix to all group names. // Useful to avoid group name clashes if an AWS account is used for multiple clusters and // used verbatim (e.g. "" means no prefix) // The default prefix is cluster-name/log-type // +optional GroupPrefix *string `json:"groupPrefix,omitempty"` }
Cloudwatch provides configuration for the output type `cloudwatch`
Note: the cloudwatch output recognizes the following keys in the Secret:
`aws_secret_access_key`: AWS secret access key. `aws_access_key_id`: AWS secret access key ID.
Or for sts-enabled clusters `credentials` or `role_arn` key specifying a properly formatted role arn
func (*Cloudwatch) DeepCopy ¶
func (in *Cloudwatch) DeepCopy() *Cloudwatch
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cloudwatch.
func (*Cloudwatch) DeepCopyInto ¶
func (in *Cloudwatch) DeepCopyInto(out *Cloudwatch)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterConditions ¶
type ClusterConditions []Condition
`operator-sdk generate crds` does not allow map-of-slice, must use a named type.
func (ClusterConditions) DeepCopy ¶
func (in ClusterConditions) DeepCopy() ClusterConditions
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConditions.
func (ClusterConditions) DeepCopyInto ¶
func (in ClusterConditions) DeepCopyInto(out *ClusterConditions)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterLogForwarder ¶
type ClusterLogForwarder struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of ClusterLogForwarder Spec ClusterLogForwarderSpec `json:"spec,omitempty"` // Status of the ClusterLogForwarder Status ClusterLogForwarderStatus `json:"status,omitempty"` }
+kubebuilder:object:root=true +kubebuilder:subresource:status +kubebuilder:resource:categories=logging,shortName=clf ClusterLogForwarder is an API to configure forwarding logs.
You configure forwarding by specifying a list of `pipelines`, which forward from a set of named inputs to a set of named outputs.
There are built-in input names for common log categories, and you can define custom inputs to do additional filtering.
There is a built-in output name for the default openshift log store, but you can define your own outputs with a URL and other connection information to forward logs to other stores or processors, inside or outside the cluster.
For more details see the documentation on the API fields.
func (*ClusterLogForwarder) DeepCopy ¶
func (in *ClusterLogForwarder) DeepCopy() *ClusterLogForwarder
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLogForwarder.
func (*ClusterLogForwarder) DeepCopyInto ¶
func (in *ClusterLogForwarder) DeepCopyInto(out *ClusterLogForwarder)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*ClusterLogForwarder) DeepCopyObject ¶
func (in *ClusterLogForwarder) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
type ClusterLogForwarderList ¶
type ClusterLogForwarderList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []ClusterLogForwarder `json:"items"` }
+kubebuilder:object:root=true ClusterLogForwarderList contains a list of ClusterLogForwarder
func (*ClusterLogForwarderList) DeepCopy ¶
func (in *ClusterLogForwarderList) DeepCopy() *ClusterLogForwarderList
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLogForwarderList.
func (*ClusterLogForwarderList) DeepCopyInto ¶
func (in *ClusterLogForwarderList) DeepCopyInto(out *ClusterLogForwarderList)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*ClusterLogForwarderList) DeepCopyObject ¶
func (in *ClusterLogForwarderList) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
type ClusterLogForwarderSpec ¶
type ClusterLogForwarderSpec struct { // Inputs are named filters for log messages to be forwarded. // // There are three built-in inputs named `application`, `infrastructure` and // `audit`. You don't need to define inputs here if those are sufficient for // your needs. See `inputRefs` for more. // // +optional //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Forwarder Inputs",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:forwarderInputs"} Inputs []InputSpec `json:"inputs,omitempty"` // Outputs are named destinations for log messages. // // There is a built-in output named `default` which forwards to the default // openshift log store. You can define outputs to forward to other stores or // log processors, inside or outside the cluster. // // +optional //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Forwarder Outputs",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:forwarderOutputs"} Outputs []OutputSpec `json:"outputs,omitempty"` // Filters are applied to log records passing through a pipeline. // There are different types of filter that can select and modify log records in different ways. // See [FilterTypeSpec] for a list of filter types. Filters []FilterSpec `json:"filters,omitempty"` // Pipelines forward the messages selected by a set of inputs to a set of outputs. // // +required //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Forwarder Pipelines",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:forwarderPipelines"} Pipelines []PipelineSpec `json:"pipelines,omitempty"` // ServiceAccountName is the serviceaccount associated with the clusterlogforwarder // // +optional ServiceAccountName string `json:"serviceAccountName,omitempty"` // DEPRECATED OutputDefaults specify forwarder config explicitly for the // default managed log store named 'default'. If there is a need to spec // the managed logstore, define an outputSpec like the following where the // managed fields (e.g. URL, Secret.Name) will be replaced with the required values: // spec: // - outputs: // - name: default // type: elasticsearch // elasticsearch: // structuredTypeKey: kubernetes.labels.myvalue // // +optional OutputDefaults *OutputDefaults `json:"outputDefaults,omitempty"` }
ClusterLogForwarderSpec defines how logs should be forwarded to remote targets.
func (*ClusterLogForwarderSpec) DeepCopy ¶
func (in *ClusterLogForwarderSpec) DeepCopy() *ClusterLogForwarderSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLogForwarderSpec.
func (*ClusterLogForwarderSpec) DeepCopyInto ¶
func (in *ClusterLogForwarderSpec) DeepCopyInto(out *ClusterLogForwarderSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*ClusterLogForwarderSpec) FilterMap ¶
func (spec *ClusterLogForwarderSpec) FilterMap() map[string]*FilterSpec
FilterMap returns a map of filter names to FilterSpec.
func (*ClusterLogForwarderSpec) HasDefaultOutput ¶
func (spec *ClusterLogForwarderSpec) HasDefaultOutput() bool
True if spec has a default output.
func (*ClusterLogForwarderSpec) InputMap ¶
func (spec *ClusterLogForwarderSpec) InputMap() map[string]*InputSpec
InputMap returns a map of input names to InputSpec.
func (*ClusterLogForwarderSpec) OutputMap ¶
func (spec *ClusterLogForwarderSpec) OutputMap() map[string]*OutputSpec
OutputMap returns a map of names to outputs.
type ClusterLogForwarderStatus ¶
type ClusterLogForwarderStatus struct { // Conditions of the log forwarder. //+operator-sdk:csv:customresourcedefinitions:type=status,displayName="Forwarder Conditions",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:forwarderConditions"} Conditions status.Conditions `json:"conditions,omitempty"` // Inputs maps input name to condition of the input. //+operator-sdk:csv:customresourcedefinitions:type=status,displayName="Input Conditions",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:inputConditions"} Inputs NamedConditions `json:"inputs,omitempty"` // Outputs maps output name to condition of the output. //+operator-sdk:csv:customresourcedefinitions:type=status,displayName="Output Conditions",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:outputConditions"} Outputs NamedConditions `json:"outputs,omitempty"` // Filters maps filter name to condition of the filter. //+operator-sdk:csv:customresourcedefinitions:type=status,displayName="Filter Conditions",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:filterConditions"} Filters NamedConditions `json:"filters,omitempty"` // Pipelines maps pipeline name to condition of the pipeline. //+operator-sdk:csv:customresourcedefinitions:type=status,displayName="Pipeline Conditions",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:pipelineConditions"} Pipelines NamedConditions `json:"pipelines,omitempty"` }
ClusterLogForwarderStatus defines the observed state of ClusterLogForwarder
func (*ClusterLogForwarderStatus) DeepCopy ¶
func (in *ClusterLogForwarderStatus) DeepCopy() *ClusterLogForwarderStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLogForwarderStatus.
func (*ClusterLogForwarderStatus) DeepCopyInto ¶
func (in *ClusterLogForwarderStatus) DeepCopyInto(out *ClusterLogForwarderStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (ClusterLogForwarderStatus) GetReadyConditionMessages ¶
func (status ClusterLogForwarderStatus) GetReadyConditionMessages() []string
Get all subordinate condition messages for condition of type "Ready" and False A 'true' Ready condition with a message means some error with pipeline but it is still valid
func (ClusterLogForwarderStatus) IsReady ¶
func (status ClusterLogForwarderStatus) IsReady() bool
IsReady returns true if all of the subordinate conditions are ready.
func (*ClusterLogForwarderStatus) Synchronize ¶
func (status *ClusterLogForwarderStatus) Synchronize(newStatus *ClusterLogForwarderStatus) error
Synchronize synchronizes the current Status with a new Status. This is not the same as simply replacing the Status: Conditions contain the LastTransitionTime field which is left unmodified by Synchronize for noops. Whereas all updates and additions shall use the current (= now) timestamp. In short, ignore any timestamp in newStatus, and for noops use the timestamp from old status or use time.Now() for updates and additions.
type ClusterLogging ¶
type ClusterLogging struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of ClusterLogging Spec ClusterLoggingSpec `json:"spec,omitempty"` // Status defines the observed state of ClusterLogging Status ClusterLoggingStatus `json:"status,omitempty"` }
+k8s:openapi-gen=true +kubebuilder:subresource:status +kubebuilder:resource:categories=logging,shortName=cl +kubebuilder:printcolumn:name="Management State",JSONPath=".spec.managementState",type=string +kubebuilder:object:root=true +kubebuilder:subresource:status A Red Hat OpenShift Logging instance. ClusterLogging is the Schema for the clusterloggings API +operator-sdk:csv:customresourcedefinitions:displayName="Cluster Logging",resources={{Pod,v1},{Deployment,v1},{ReplicaSet,v1},{ConfigMap,v1},{Service,v1},{Route,v1},{CronJob,v1},{Role,v1},{RoleBinding,v1},{ServiceAccount,v1},{ServiceMonitor,v1},{persistentvolumeclaims,v1}}
func (*ClusterLogging) DeepCopy ¶
func (in *ClusterLogging) DeepCopy() *ClusterLogging
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLogging.
func (*ClusterLogging) DeepCopyInto ¶
func (in *ClusterLogging) DeepCopyInto(out *ClusterLogging)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*ClusterLogging) DeepCopyObject ¶
func (in *ClusterLogging) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
type ClusterLoggingList ¶
type ClusterLoggingList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []ClusterLogging `json:"items"` }
+kubebuilder:object:root=true ClusterLoggingList contains a list of ClusterLogging
func (*ClusterLoggingList) DeepCopy ¶
func (in *ClusterLoggingList) DeepCopy() *ClusterLoggingList
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLoggingList.
func (*ClusterLoggingList) DeepCopyInto ¶
func (in *ClusterLoggingList) DeepCopyInto(out *ClusterLoggingList)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*ClusterLoggingList) DeepCopyObject ¶
func (in *ClusterLoggingList) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
type ClusterLoggingSpec ¶
type ClusterLoggingSpec struct { // Indicator if the resource is 'Managed' or 'Unmanaged' by the operator // // +kubebuilder:validation:Enum:=Managed;Unmanaged // +optional ManagementState ManagementState `json:"managementState,omitempty"` // Specification of the Visualization component for the cluster // // +nullable // +optional Visualization *VisualizationSpec `json:"visualization,omitempty"` // Specification of the Log Storage component for the cluster // // +nullable // +optional LogStore *LogStoreSpec `json:"logStore,omitempty"` // Specification of the Collection component for the cluster // // +nullable Collection *CollectionSpec `json:"collection,omitempty"` // Deprecated. Specification of the Curation component for the cluster // This component was specifically for use with Elasticsearch and was // replaced by index management spec // // +nullable // +optional // +deprecated //+operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:hidden"} Curation *CurationSpec `json:"curation,omitempty"` // Deprecated. Specification for Forwarder component for the cluster // See spec.collection.fluentd // // +nullable // +optional // +deprecated //+operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:hidden"} Forwarder *ForwarderSpec `json:"forwarder,omitempty"` }
ClusterLoggingSpec defines the desired state of ClusterLogging +k8s:openapi-gen=true
func (*ClusterLoggingSpec) DeepCopy ¶
func (in *ClusterLoggingSpec) DeepCopy() *ClusterLoggingSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLoggingSpec.
func (*ClusterLoggingSpec) DeepCopyInto ¶
func (in *ClusterLoggingSpec) DeepCopyInto(out *ClusterLoggingSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterLoggingStatus ¶
type ClusterLoggingStatus struct { // +optional Visualization VisualizationStatus `json:"visualization"` // +optional LogStore LogStoreStatus `json:"logStore"` // Deprecated. // +optional // +deprecated // +nullable Collection *CollectionStatus `json:"collection,omitempty"` // +optional // +deprecated Curation *CurationStatus `json:"curation,omitempty"` // +optional Conditions status.Conditions `json:"conditions,omitempty"` }
ClusterLoggingStatus defines the observed state of ClusterLogging +k8s:openapi-gen=true
func (*ClusterLoggingStatus) DeepCopy ¶
func (in *ClusterLoggingStatus) DeepCopy() *ClusterLoggingStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLoggingStatus.
func (*ClusterLoggingStatus) DeepCopyInto ¶
func (in *ClusterLoggingStatus) DeepCopyInto(out *ClusterLoggingStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type CollectionSpec ¶
type CollectionSpec struct { // The type of Log Collection to configure // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Collector Implementation",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:fluentd","urn:alm:descriptor:com.tectonic.ui:select:vector"} // +kubebuilder:validation:Optional Type LogCollectionType `json:"type"` // Deprecated. Specification of Log Collection for the cluster // See spec.collection // +nullable // +optional // +deprecated // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:hidden"} Logs *LogCollectionSpec `json:"logs,omitempty"` // CollectorSpec is the common specification that applies to any collector // +nullable // +optional CollectorSpec `json:",inline"` // Fluentd represents the configuration for forwarders of type fluentd. // +nullable // +optional Fluentd *FluentdForwarderSpec `json:"fluentd,omitempty"` }
This is the struct that will contain information pertinent to Log and event collection
func (*CollectionSpec) DeepCopy ¶
func (in *CollectionSpec) DeepCopy() *CollectionSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectionSpec.
func (*CollectionSpec) DeepCopyInto ¶
func (in *CollectionSpec) DeepCopyInto(out *CollectionSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type CollectionStatus ¶
type CollectionStatus struct { // +optional Logs LogCollectionStatus `json:"logs,omitempty"` }
func (*CollectionStatus) DeepCopy ¶
func (in *CollectionStatus) DeepCopy() *CollectionStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectionStatus.
func (*CollectionStatus) DeepCopyInto ¶
func (in *CollectionStatus) DeepCopyInto(out *CollectionStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type CollectorSpec ¶
type CollectorSpec struct { // The resource requirements for the collector // +nullable // +optional //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Collector Resource Requirements",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:resourceRequirements"} Resources *v1.ResourceRequirements `json:"resources,omitempty"` // Define which Nodes the Pods are scheduled on. // +nullable // +optional //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Collector Node Selector",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:selector:core:v1:ConfigMap"} NodeSelector map[string]string `json:"nodeSelector,omitempty"` // Define the tolerations the Pods will accept // +nullable // +optional //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Collector Pod Tolerations",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Toleration"} Tolerations []v1.Toleration `json:"tolerations,omitempty"` }
CollectorSpec is spec to define scheduling and resources for a collector
func (*CollectorSpec) DeepCopy ¶
func (in *CollectorSpec) DeepCopy() *CollectorSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectorSpec.
func (*CollectorSpec) DeepCopyInto ¶
func (in *CollectorSpec) DeepCopyInto(out *CollectorSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Condition ¶
Aliases for convenience
func CondInvalid ¶
func CondNotReady ¶
func CondNotReady(r ConditionReason, format string, args ...interface{}) Condition
func NewCondition ¶
func NewCondition(t status.ConditionType, s corev1.ConditionStatus, r status.ConditionReason, format string, args ...interface{}) Condition
type ConditionReason ¶
type ConditionReason = status.ConditionReason
type ConditionType ¶
type ConditionType = status.ConditionType
const ( IncorrectCRName ConditionType = "IncorrectCRName" ContainerWaiting ConditionType = "ContainerWaiting" ContainerTerminated ConditionType = "ContainerTerminated" Unschedulable ConditionType = "Unschedulable" NodeStorage ConditionType = "NodeStorage" CollectorDeadEnd ConditionType = "CollectorDeadEnd" )
type Conditions ¶
type Conditions = status.Conditions
func NewConditions ¶
func NewConditions(c ...Condition) Conditions
type CurationSpec ¶
type CurationSpec struct { // The kind of curation to configure Type CurationType `json:"type"` // The specification of curation to configure CuratorSpec `json:"curator,omitempty"` }
This is the struct that will contain information pertinent to Log curation (Curator)
func (*CurationSpec) DeepCopy ¶
func (in *CurationSpec) DeepCopy() *CurationSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CurationSpec.
func (*CurationSpec) DeepCopyInto ¶
func (in *CurationSpec) DeepCopyInto(out *CurationSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type CurationStatus ¶
type CurationStatus struct { // +optional CuratorStatus []CuratorStatus `json:"curatorStatus,omitempty"` }
func (*CurationStatus) DeepCopy ¶
func (in *CurationStatus) DeepCopy() *CurationStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CurationStatus.
func (*CurationStatus) DeepCopyInto ¶
func (in *CurationStatus) DeepCopyInto(out *CurationStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type CuratorSpec ¶
type CuratorSpec struct { // The resource requirements for Curator // // +nullable // +optional Resources *v1.ResourceRequirements `json:"resources"` // Define which Nodes the Pods are scheduled on. // // +nullable NodeSelector map[string]string `json:"nodeSelector,omitempty"` Tolerations []v1.Toleration `json:"tolerations,omitempty"` // The cron schedule that the Curator job is run. Defaults to "30 3 * * *" Schedule string `json:"schedule"` }
func (*CuratorSpec) DeepCopy ¶
func (in *CuratorSpec) DeepCopy() *CuratorSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CuratorSpec.
func (*CuratorSpec) DeepCopyInto ¶
func (in *CuratorSpec) DeepCopyInto(out *CuratorSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type CuratorStatus ¶
type CuratorStatus struct { // +optional CronJob string `json:"cronJobs"` // +optional Schedule string `json:"schedules"` // +optional Suspended bool `json:"suspended"` // +optional Conditions map[string]ClusterConditions `json:"clusterCondition,omitempty"` }
func (*CuratorStatus) DeepCopy ¶
func (in *CuratorStatus) DeepCopy() *CuratorStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CuratorStatus.
func (*CuratorStatus) DeepCopyInto ¶
func (in *CuratorStatus) DeepCopyInto(out *CuratorStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type DropCondition ¶
type DropCondition struct { // A dot delimited path to a field in the log record. It must start with a `.`. // The path can contain alpha-numeric characters and underscores (a-zA-Z0-9_). // If segments contain characters outside of this range, the segment must be quoted. // Examples: `.kubernetes.namespace_name`, `.log_type`, '.kubernetes.labels.foobar', `.kubernetes.labels."foo-bar/baz"` // +optional Field string `json:"field,omitempty"` // A regular expression that the field will match. // If the value of the field defined in the DropTest matches the regular expression, the log record will be dropped. // Must define only one of matches OR notMatches // +optional Matches string `json:"matches,omitempty"` // A regular expression that the field does not match. // If the value of the field defined in the DropTest does not match the regular expression, the log record will be dropped. // Must define only one of matches or notMatches // +optional NotMatches string `json:"notMatches,omitempty"` }
func (*DropCondition) DeepCopy ¶
func (in *DropCondition) DeepCopy() *DropCondition
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DropCondition.
func (*DropCondition) DeepCopyInto ¶
func (in *DropCondition) DeepCopyInto(out *DropCondition)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type DropTest ¶
type DropTest struct { // DropConditions is an array of DropCondition which are conditions that are ANDed together // +optional DropConditions []DropCondition `json:"test,omitempty"` }
func (*DropTest) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DropTest.
func (*DropTest) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Elasticsearch ¶
type Elasticsearch struct { ElasticsearchStructuredSpec `json:",inline"` // Version specifies the version of Elasticsearch to be used. // Must be one of: // - 6 - Default for internal ES store // - 7 // - 8 - Latest for external ES store // // +kubebuilder:validation:Minimum:=6 // +optional Version int `json:"version,omitempty"` }
func (*Elasticsearch) DeepCopy ¶
func (in *Elasticsearch) DeepCopy() *Elasticsearch
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Elasticsearch.
func (*Elasticsearch) DeepCopyInto ¶
func (in *Elasticsearch) DeepCopyInto(out *Elasticsearch)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ElasticsearchClusterConditions ¶
type ElasticsearchClusterConditions []elasticsearch.ClusterCondition
func (ElasticsearchClusterConditions) DeepCopy ¶
func (in ElasticsearchClusterConditions) DeepCopy() ElasticsearchClusterConditions
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterConditions.
func (ElasticsearchClusterConditions) DeepCopyInto ¶
func (in ElasticsearchClusterConditions) DeepCopyInto(out *ElasticsearchClusterConditions)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ElasticsearchRoleType ¶
type ElasticsearchRoleType string
const ( ElasticsearchRoleTypeClient ElasticsearchRoleType = "client" ElasticsearchRoleTypeData ElasticsearchRoleType = "data" ElasticsearchRoleTypeMaster ElasticsearchRoleType = "master" )
type ElasticsearchSpec ¶
type ElasticsearchSpec struct { // The resource requirements for Elasticsearch // // +nullable // +optional //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Elasticsearch Resource Requirements",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:resourceRequirements"} Resources *v1.ResourceRequirements `json:"resources,omitempty"` // Number of nodes to deploy for Elasticsearch //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Elasticsearch Size",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:podCount"} NodeCount int32 `json:"nodeCount,omitempty"` // Define which Nodes the Pods are scheduled on. // // +nullable //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Elasticsearch Node Selector",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:nodeSelector"} NodeSelector map[string]string `json:"nodeSelector,omitempty"` Tolerations []v1.Toleration `json:"tolerations,omitempty"` // The storage specification for Elasticsearch data nodes // // +nullable // +optional Storage elasticsearch.ElasticsearchStorageSpec `json:"storage,omitempty"` // +optional RedundancyPolicy elasticsearch.RedundancyPolicyType `json:"redundancyPolicy,omitempty"` // Specification of the Elasticsearch Proxy component ProxySpec `json:"proxy,omitempty"` }
func (*ElasticsearchSpec) DeepCopy ¶
func (in *ElasticsearchSpec) DeepCopy() *ElasticsearchSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchSpec.
func (*ElasticsearchSpec) DeepCopyInto ¶
func (in *ElasticsearchSpec) DeepCopyInto(out *ElasticsearchSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ElasticsearchStatus ¶
type ElasticsearchStatus struct { // +optional ClusterName string `json:"clusterName"` // +optional NodeCount int32 `json:"nodeCount"` // +optional ReplicaSets []string `json:"replicaSets,omitempty"` // +optional Deployments []string `json:"deployments,omitempty"` // +optional StatefulSets []string `json:"statefulSets,omitempty"` // +optional ClusterHealth string `json:"clusterHealth,omitempty"` // +optional Cluster elasticsearch.ClusterHealth `json:"cluster"` // +optional Pods map[ElasticsearchRoleType]PodStateMap `json:"pods,omitempty"` // +optional ShardAllocationEnabled elasticsearch.ShardAllocationState `json:"shardAllocationEnabled"` // +optional ClusterConditions ElasticsearchClusterConditions `json:"clusterConditions,omitempty"` // +optional NodeConditions map[string]ElasticsearchClusterConditions `json:"nodeConditions,omitempty"` }
func (*ElasticsearchStatus) DeepCopy ¶
func (in *ElasticsearchStatus) DeepCopy() *ElasticsearchStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchStatus.
func (*ElasticsearchStatus) DeepCopyInto ¶
func (in *ElasticsearchStatus) DeepCopyInto(out *ElasticsearchStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ElasticsearchStructuredSpec ¶
type ElasticsearchStructuredSpec struct { // StructuredTypeKey specifies the metadata key to be used as name of elasticsearch index // It takes precedence over StructuredTypeName // // +optional StructuredTypeKey string `json:"structuredTypeKey,omitempty"` // StructuredTypeName specifies the name of elasticsearch schema // // +optional StructuredTypeName string `json:"structuredTypeName,omitempty"` // EnableStructuredContainerLogs enables multi-container structured logs to allow // forwarding logs from containers within a pod to separate indices. Annotating // the pod with key 'containerType.logging.openshift.io/<container-name>' and value // '<structure-type-name>' will forward those container logs to an alternate index // from that defined by the other 'structured' keys here // // +optional EnableStructuredContainerLogs bool `json:"enableStructuredContainerLogs,omitempty"` }
ElasticsearchStructuredSpec is spec related to structured log changes to determine the elasticsearch index
func (*ElasticsearchStructuredSpec) DeepCopy ¶
func (in *ElasticsearchStructuredSpec) DeepCopy() *ElasticsearchStructuredSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchStructuredSpec.
func (*ElasticsearchStructuredSpec) DeepCopyInto ¶
func (in *ElasticsearchStructuredSpec) DeepCopyInto(out *ElasticsearchStructuredSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type EventCollectionSpec ¶
type EventCollectionSpec struct {
Type EventCollectionType `json:"type"`
}
func (*EventCollectionSpec) DeepCopy ¶
func (in *EventCollectionSpec) DeepCopy() *EventCollectionSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventCollectionSpec.
func (*EventCollectionSpec) DeepCopyInto ¶
func (in *EventCollectionSpec) DeepCopyInto(out *EventCollectionSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type EventCollectionStatus ¶
type EventCollectionStatus struct { }
func (*EventCollectionStatus) DeepCopy ¶
func (in *EventCollectionStatus) DeepCopy() *EventCollectionStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventCollectionStatus.
func (*EventCollectionStatus) DeepCopyInto ¶
func (in *EventCollectionStatus) DeepCopyInto(out *EventCollectionStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type EventCollectionType ¶
type EventCollectionType string
type FilterSpec ¶
type FilterSpec struct { // Name used to refer to the filter from a `pipeline`. // // +kubebuilder:validation:minLength:=1 // +required Name string `json:"name"` // Type of filter. // // +kubebuilder:validation:Enum:=kubeAPIAudit;drop;prune // +required Type string `json:"type"` FilterTypeSpec `json:",inline"` }
Filter defines a filter for log messages. See FilterTypeSpec for a list of filter types.
func (*FilterSpec) DeepCopy ¶
func (in *FilterSpec) DeepCopy() *FilterSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterSpec.
func (*FilterSpec) DeepCopyInto ¶
func (in *FilterSpec) DeepCopyInto(out *FilterSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type FilterTypeSpec ¶
type FilterTypeSpec struct { // +optional KubeAPIAudit *KubeAPIAudit `json:"kubeAPIAudit,omitempty"` // A drop filter applies a sequence of tests to a log record and drops the record if any test passes. // Each test contains a sequence of conditions, all conditions must be true for the test to pass. // A DropTestsSpec contains an array of tests which contains an array of conditions // +optional DropTestsSpec *[]DropTest `json:"drop,omitempty"` // The PruneFilterSpec consists of two arrays, namely in and notIn, which dictate the fields to be pruned. // +optional PruneFilterSpec *PruneFilterSpec `json:"prune,omitempty"` }
FilterTypeSpec is a union of filter specification types. The fields of this struct define the set of known filter types.
func (*FilterTypeSpec) DeepCopy ¶
func (in *FilterTypeSpec) DeepCopy() *FilterTypeSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterTypeSpec.
func (*FilterTypeSpec) DeepCopyInto ¶
func (in *FilterTypeSpec) DeepCopyInto(out *FilterTypeSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type FluentdBufferSpec ¶
type FluentdBufferSpec struct { // ChunkLimitSize represents the maximum size of each chunk. Events will be // written into chunks until the size of chunks become this size. // // +optional ChunkLimitSize FluentdSizeUnit `json:"chunkLimitSize"` // TotalLimitSize represents the threshold of node space allowed per fluentd // buffer to allocate. Once this threshold is reached, all append operations // will fail with error (and data will be lost). // // +optional TotalLimitSize FluentdSizeUnit `json:"totalLimitSize"` // OverflowAction represents the action for the fluentd buffer plugin to // execute when a buffer queue is full. (Default: block) // // +kubebuilder:validation:Enum:=throw_exception;block;drop_oldest_chunk // +optional OverflowAction OverflowActionType `json:"overflowAction"` // FlushThreadCount reprents the number of threads used by the fluentd buffer // plugin to flush/write chunks in parallel. // // +optional FlushThreadCount int32 `json:"flushThreadCount"` // FlushMode represents the mode of the flushing thread to write chunks. The mode // allows lazy (if `time` parameter set), per interval or immediate flushing. // // +kubebuilder:validation:Enum:=lazy;interval;immediate // +optional FlushMode FlushModeType `json:"flushMode"` // FlushInterval represents the time duration to wait between two consecutive flush // operations. Takes only effect used together with `flushMode: interval`. // // +optional FlushInterval FluentdTimeUnit `json:"flushInterval"` // RetryWait represents the time duration between two consecutive retries to flush // buffers for periodic retries or a constant factor of time on retries with exponential // backoff. // // +optional RetryWait FluentdTimeUnit `json:"retryWait"` // RetryType represents the type of retrying flush operations. Flush operations can // be retried either periodically or by applying exponential backoff. // // +kubebuilder:validation:Enum:=exponential_backoff;periodic // +optional RetryType RetryTypeType `json:"retryType"` // RetryMaxInterval represents the maximum time interval for exponential backoff // between retries. Takes only effect if used together with `retryType: exponential_backoff`. // // +optional RetryMaxInterval FluentdTimeUnit `json:"retryMaxInterval"` // RetryTimeout represents the maximum time interval to attempt retries before giving up // and the record is disguarded. If unspecified, the default will be used // // +optional RetryTimeout FluentdTimeUnit `json:"retryTimeout"` }
FluentdBufferSpec represents a subset of fluentd buffer parameters to tune the buffer configuration for all fluentd outputs. It supports a subset of parameters to configure buffer and queue sizing, flush operations and retry flushing.
For general parameters refer to: https://docs.fluentd.org/configuration/buffer-section#buffering-parameters
For flush parameters refer to: https://docs.fluentd.org/configuration/buffer-section#flushing-parameters
For retry parameters refer to: https://docs.fluentd.org/configuration/buffer-section#retries-parameters
func (*FluentdBufferSpec) DeepCopy ¶
func (in *FluentdBufferSpec) DeepCopy() *FluentdBufferSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdBufferSpec.
func (*FluentdBufferSpec) DeepCopyInto ¶
func (in *FluentdBufferSpec) DeepCopyInto(out *FluentdBufferSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type FluentdCollectorStatus ¶
type FluentdCollectorStatus struct { // +optional DaemonSet string `json:"daemonSet,omitempty"` // +optional Nodes map[string]string `json:"nodes,omitempty"` // +optional Pods PodStateMap `json:"pods,omitempty"` // +optional Conditions map[string]ClusterConditions `json:"clusterCondition,omitempty"` }
func (*FluentdCollectorStatus) DeepCopy ¶
func (in *FluentdCollectorStatus) DeepCopy() *FluentdCollectorStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdCollectorStatus.
func (*FluentdCollectorStatus) DeepCopyInto ¶
func (in *FluentdCollectorStatus) DeepCopyInto(out *FluentdCollectorStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type FluentdForward ¶
type FluentdForward struct{}
FluentdForward does not provide additional fields, but note that the fluentforward output allows this additional keys in the Secret:
`shared_key`: (string) Key to enable fluent-forward shared-key authentication.
func (*FluentdForward) DeepCopy ¶
func (in *FluentdForward) DeepCopy() *FluentdForward
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdForward.
func (*FluentdForward) DeepCopyInto ¶
func (in *FluentdForward) DeepCopyInto(out *FluentdForward)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type FluentdForwarderSpec ¶
type FluentdForwarderSpec struct { InFile *FluentdInFileSpec `json:"inFile,omitempty"` Buffer *FluentdBufferSpec `json:"buffer,omitempty"` }
FluentdForwarderSpec represents the configuration for forwarders of type fluentd.
func (*FluentdForwarderSpec) DeepCopy ¶
func (in *FluentdForwarderSpec) DeepCopy() *FluentdForwarderSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdForwarderSpec.
func (*FluentdForwarderSpec) DeepCopyInto ¶
func (in *FluentdForwarderSpec) DeepCopyInto(out *FluentdForwarderSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type FluentdInFileSpec ¶
type FluentdInFileSpec struct { //ReadLinesLimit represents the number of lines to read with each I/O operation // +optional ReadLinesLimit int `json:"readLinesLimit"` }
FluentdInFileSpec represents a subset of fluentd in-tail plugin parameters to tune the configuration for all fluentd in-tail inputs.
For general parameters refer to: https://docs.fluentd.org/input/tail#parameters
func (*FluentdInFileSpec) DeepCopy ¶
func (in *FluentdInFileSpec) DeepCopy() *FluentdInFileSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdInFileSpec.
func (*FluentdInFileSpec) DeepCopyInto ¶
func (in *FluentdInFileSpec) DeepCopyInto(out *FluentdInFileSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type FluentdNormalizerStatus ¶
type FluentdNormalizerStatus struct { // +optional Replicas int32 `json:"replicas"` // +optional ReplicaSets []string `json:"replicaSets"` // +optional Pods PodStateMap `json:"pods"` // +optional Conditions map[string]ClusterConditions `json:"clusterCondition,omitempty"` }
func (*FluentdNormalizerStatus) DeepCopy ¶
func (in *FluentdNormalizerStatus) DeepCopy() *FluentdNormalizerStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdNormalizerStatus.
func (*FluentdNormalizerStatus) DeepCopyInto ¶
func (in *FluentdNormalizerStatus) DeepCopyInto(out *FluentdNormalizerStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type FluentdSizeUnit ¶
type FluentdSizeUnit string
FluentdSizeUnit represents fluentd's parameter type for memory sizes.
For datatype pattern see: https://docs.fluentd.org/configuration/config-file#supported-data-types-for-values
Notice: The OpenAPI validation pattern is an ECMA262 regular expression (See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#properties)
+kubebuilder:validation:Pattern:="^([0-9]+)([kmgtKMGT]{0,1})$"
type FluentdTimeUnit ¶
type FluentdTimeUnit string
FluentdTimeUnit represents fluentd's parameter type for time.
For data type pattern see: https://docs.fluentd.org/configuration/config-file#supported-data-types-for-values
Notice: The OpenAPI validation pattern is an ECMA262 regular expression (See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#properties) +kubebuilder:validation:Pattern:="^([0-9]+)([smhd]{0,1})$"
type FlushModeType ¶
type FlushModeType string
const ( // Flush one chunk per time key if time is specified as chunk key FlushModeLazy FlushModeType = "lazy" // Flush chunks per specified time via FlushInterval FlushModeInterval FlushModeType = "interval" // Flush immediately after events appended to chunks FlushModeImmediate FlushModeType = "immediate" )
type ForwarderSpec ¶
type ForwarderSpec struct {
Fluentd *FluentdForwarderSpec `json:"fluentd,omitempty"`
}
ForwarderSpec contains global tuning parameters for specific forwarder implementations. This field is not required for general use, it allows performance tuning by users familiar with the underlying forwarder technology. Currently supported: `fluentd`.
func (*ForwarderSpec) DeepCopy ¶
func (in *ForwarderSpec) DeepCopy() *ForwarderSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwarderSpec.
func (*ForwarderSpec) DeepCopyInto ¶
func (in *ForwarderSpec) DeepCopyInto(out *ForwarderSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type GoogleCloudLogging ¶
type GoogleCloudLogging struct { // +optional BillingAccountID string `json:"billingAccountId,omitempty"` // +optional OrganizationID string `json:"organizationId,omitempty"` // +optional FolderID string `json:"folderId,omitempty"` // +optional ProjectID string `json:"projectId,omitempty"` //LogID is the log ID to which to publish logs. This identifies log stream. LogID string `json:"logId,omitempty"` }
GoogleCloudLogging provides configuration for sending logs to Google Cloud Logging. Exactly one of billingAccountID, organizationID, folderID, or projectID must be set.
func (*GoogleCloudLogging) DeepCopy ¶
func (in *GoogleCloudLogging) DeepCopy() *GoogleCloudLogging
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleCloudLogging.
func (*GoogleCloudLogging) DeepCopyInto ¶
func (in *GoogleCloudLogging) DeepCopyInto(out *GoogleCloudLogging)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type HTTPReceiver ¶
type HTTPReceiver struct { // Port the Receiver listens on. It must be a value between 1024 and 65535 // +kubebuilder:default:=8443 // +kubebuilder:validation:Minimum:=1024 // +kubebuilder:validation:Maximum:=65535 // +optional Port int32 `json:"port"` // Format is the format of incoming log data. // // +kubebuilder:validation:Enum:=kubeAPIAudit // +required Format string `json:"format"` }
HTTPReceiver receives encoded logs as a HTTP endpoint.
func (*HTTPReceiver) DeepCopy ¶
func (in *HTTPReceiver) DeepCopy() *HTTPReceiver
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPReceiver.
func (*HTTPReceiver) DeepCopyInto ¶
func (in *HTTPReceiver) DeepCopyInto(out *HTTPReceiver)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Http ¶
type Http struct { // Headers specify optional headers to be sent with the request // +optional Headers map[string]string `json:"headers,omitempty"` // Timeout specifies the Http request timeout in seconds. If not set, 10secs is used. // +optional Timeout int `json:"timeout,omitempty"` // Method specifies the Http method to be used for sending logs. If not set, 'POST' is used. // +kubebuilder:validation:Enum:=GET;HEAD;POST;PUT;DELETE;OPTIONS;TRACE;PATCH // +optional Method string `json:"method,omitempty"` // Schema enables configuration of the way log records are normalized. // // Supported models: viaq(default), opentelemetry // // Logs are converted to the Open Telemetry specification according to schema value // // +kubebuilder:validation:Enum:=opentelemetry;viaq // +kubebuilder:default:viaq // +optional // +operator-sdk:csv:customresourcedefinitions:xDescriptors={"urn:alm:descriptor:com.tectonic.ui:hidden"} Schema string `json:"-"` }
Http provided configuration for sending json encoded logs to a generic http endpoint.
func (*Http) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http.
func (*Http) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Infrastructure ¶
type Infrastructure struct { // Sources defines the list of infrastructure sources to collect. // This field is optional and omission results in the collection of all infrastructure sources. Valid sources are: // node, container // // +optional Sources []string `json:"sources,omitempty"` }
Infrastructure enables infrastructure logs. Filtering may be added in future. Sources of these logs: * container workloads deployed to namespaces: default, kube*, openshift* * journald logs from cluster nodes
func (*Infrastructure) DeepCopy ¶
func (in *Infrastructure) DeepCopy() *Infrastructure
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Infrastructure.
func (*Infrastructure) DeepCopyInto ¶
func (in *Infrastructure) DeepCopyInto(out *Infrastructure)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type InputSpec ¶
type InputSpec struct { // Name used to refer to the input of a `pipeline`. // // +kubebuilder:validation:minLength:=1 // +required Name string `json:"name"` // Application, if present, enables named set of `application` logs that // can specify a set of match criteria // // +optional Application *Application `json:"application,omitempty"` // Infrastructure, if present, enables `infrastructure` logs. // // +optional Infrastructure *Infrastructure `json:"infrastructure,omitempty"` // Audit, if present, enables `audit` logs. // // +optional Audit *Audit `json:"audit,omitempty"` // Receiver to receive logs from non-cluster sources. // +optional Receiver *ReceiverSpec `json:"receiver,omitempty"` }
InputSpec defines a selector of log messages for a given log type. The input is rejected if more than one of the following subfields are defined: application, infrastructure, audit, and receiver.
func (*InputSpec) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputSpec.
func (*InputSpec) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*InputSpec) GetMaxRecordsPerSecond ¶
type Kafka ¶
type Kafka struct { // Topic specifies the target topic to send logs to. // // +optional Topic string `json:"topic,omitempty"` // Brokers specifies the list of broker endpoints of a Kafka cluster. // The list represents only the initial set used by the collector's Kafka client for the // first connection only. The collector's Kafka client fetches constantly an updated list // from Kafka. These updates are not reconciled back to the collector configuration. // If none provided the target URL from the OutputSpec is used as fallback. // // +optional Brokers []string `json:"brokers,omitempty"` }
Kafka provides optional extra properties for `type: kafka`
func (*Kafka) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kafka.
func (*Kafka) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type KibanaSpec ¶
type KibanaSpec struct { // The resource requirements for Kibana // // +nullable // +optional //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Kibana Resource Requirements",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:resourceRequirements"} Resources *v1.ResourceRequirements `json:"resources"` // Define which Nodes the Pods are scheduled on. // // +deprecated // +nullable //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Kibana Node Selector",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:nodeSelector"} NodeSelector map[string]string `json:"nodeSelector,omitempty"` // Define the tolerations the Pods will accept // // +deprecated // +nullable //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Kibana Tolerations",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Toleration"} Tolerations []v1.Toleration `json:"tolerations,omitempty"` // Number of instances to deploy for a Kibana deployment // +optional //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Kibana Size",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:podCount"} Replicas *int32 `json:"replicas,omitempty"` // Specification of the Kibana Proxy component ProxySpec `json:"proxy,omitempty"` }
func (*KibanaSpec) DeepCopy ¶
func (in *KibanaSpec) DeepCopy() *KibanaSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KibanaSpec.
func (*KibanaSpec) DeepCopyInto ¶
func (in *KibanaSpec) DeepCopyInto(out *KibanaSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type KibanaStatus ¶
type KibanaStatus struct { // +optional Replicas int32 `json:"replicas"` // +optional Deployment string `json:"deployment"` // +optional ReplicaSets []string `json:"replicaSets"` // +optional Pods PodStateMap `json:"pods"` // +optional Conditions map[string]ClusterConditions `json:"clusterCondition,omitempty"` }
func (*KibanaStatus) DeepCopy ¶
func (in *KibanaStatus) DeepCopy() *KibanaStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KibanaStatus.
func (*KibanaStatus) DeepCopyInto ¶
func (in *KibanaStatus) DeepCopyInto(out *KibanaStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type KubeAPIAudit ¶
type KubeAPIAudit struct { // Rules specify the audit Level a request should be recorded at. // A request may match multiple rules, in which case the FIRST matching rule is used. // PolicyRules are strictly ordered. // // If Rules is empty or missing default rules apply, see [KubeAPIAudit] Rules []auditv1.PolicyRule `json:"rules,omitempty"` // OmitStages is a list of stages for which no events are created. // Note that this can also be specified per rule in which case the union of both are omitted. // +optional OmitStages []auditv1.Stage `json:"omitStages,omitempty"` // OmitResponseCodes is a list of HTTP status code for which no events are created. // If this field is missing or null, the default value used is [404, 409, 422, 429] // (NotFound, Conflict, UnprocessableEntity, TooManyRequests) // If it is the empty list [], then no status codes are omitted. // Otherwise this field should be a list of integer status codes to omit. // // +optional OmitResponseCodes *[]int `json:"omitResponseCodes,omitempty"` }
KubeAPIAudit filter Kube API server audit logs, as described in Kubernetes Auditing.
Policy Filtering ¶
Policy event rules are the same format as the Kube Audit Policy with some minor extensions. The extensions are described here, see the Kube Audit Policy for the standard rule behavior. Rules are checked in order, checking stops at the first matching rule.
An audit policy event contains meta-data describing who made the request. It can also include the full body of the API request, and the response that was sent. The `level` of an audit rule determines how much data is included in the event:
- None: the event is dropped.
- Metadata: Only the audit metadata is included, request and response bodies are removed.
- Request: Audit metadata and the request body are included, the response body is removed.
- RequestResponse: All data is included: metadata, request body and response body. Note the response body can be very large. For example the a command like `oc get -A pods` generates a response body containing the YAML description of every pod in the cluster.
Extensions ¶
The following features are extensions to the standard Kube Audit Policy
## Wildcards
Names of users, groups, namespaces, and resources can have a leading or trailing '*' character. For example namespace 'openshift-*' matches 'openshift-apiserver' or 'openshift-authentication. Resource '*/status' matches 'Pod/status' or 'Deployment/status'
## Default Rules
Events that do not match any rule in the policy are filtered as follows: - User events (ie. non-system and non-serviceaccount) are forwarded - Read-only system events (get/list/watch etc) are dropped - Service account write events that occur within the same namespace as the service account are dropped - All other events are forwarded, subject to any configured [rate limits][#rate-lmiting]
If you want to disable these defaults, end your rules list with rule that has only a `level` field. An empty rule matches any event, and prevents the defaults from taking effect.
## Omit Response Codes
You can drop events based on the HTTP status code in the response. See the OmitResponseCodes field.
func (*KubeAPIAudit) DeepCopy ¶
func (in *KubeAPIAudit) DeepCopy() *KubeAPIAudit
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIAudit.
func (*KubeAPIAudit) DeepCopyInto ¶
func (in *KubeAPIAudit) DeepCopyInto(out *KubeAPIAudit)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type LabelSelector ¶
type LabelSelector metav1.LabelSelector
LabelSelector is a label query over a set of resources.
func (*LabelSelector) DeepCopy ¶
func (in *LabelSelector) DeepCopy() *LabelSelector
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelector.
func (*LabelSelector) DeepCopyInto ¶
func (in *LabelSelector) DeepCopyInto(out *LabelSelector)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type LimitSpec ¶
type LimitSpec struct { // MaxRecordsPerSecond is the maximum number of log records // allowed per input/output in a pipeline // // +required MaxRecordsPerSecond int64 `json:"maxRecordsPerSecond,omitempty"` }
func (*LimitSpec) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitSpec.
func (*LimitSpec) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type LogCollectionSpec ¶
type LogCollectionSpec struct { // The type of Log Collection to configure Type LogCollectionType `json:"type"` // Specification of the Fluentd Log Collection component CollectorSpec `json:"fluentd,omitempty"` }
Specification of Log Collection for the cluster See spec.collection +deprecated
func (*LogCollectionSpec) DeepCopy ¶
func (in *LogCollectionSpec) DeepCopy() *LogCollectionSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogCollectionSpec.
func (*LogCollectionSpec) DeepCopyInto ¶
func (in *LogCollectionSpec) DeepCopyInto(out *LogCollectionSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type LogCollectionStatus ¶
type LogCollectionStatus struct { // +optional FluentdStatus FluentdCollectorStatus `json:"fluentdStatus,omitempty"` }
func (*LogCollectionStatus) DeepCopy ¶
func (in *LogCollectionStatus) DeepCopy() *LogCollectionStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogCollectionStatus.
func (*LogCollectionStatus) DeepCopyInto ¶
func (in *LogCollectionStatus) DeepCopyInto(out *LogCollectionStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type LogCollectionType ¶
type LogCollectionType string
const ( LogCollectionTypeFluentd LogCollectionType = "fluentd" LogCollectionTypeVector LogCollectionType = "vector" )
func (LogCollectionType) IsSupportedCollector ¶
func (ct LogCollectionType) IsSupportedCollector() bool
type LogGroupByType ¶
type LogGroupByType string
LogGroupByType defines a fixed strategy type
const ( //LogGroupByLogType is the strategy to group logs by source(e.g. app, infra) LogGroupByLogType LogGroupByType = "logType" // LogGroupByNamespaceName is the strategy to use for grouping logs by namespace. Infrastructure and // audit logs are always grouped by "logType" LogGroupByNamespaceName LogGroupByType = "namespaceName" // LogGroupByNamespaceUUID is the strategy to use for grouping logs by namespace UUID. Infrastructure and // audit logs are always grouped by "logType" LogGroupByNamespaceUUID LogGroupByType = "namespaceUUID" )
type LogStoreSpec ¶
type LogStoreSpec struct { // The Type of Log Storage to configure. The operator currently supports either using ElasticSearch // managed by elasticsearch-operator or Loki managed by loki-operator (LokiStack) as a default log store. // // When using ElasticSearch as a log store this operator also manages the ElasticSearch deployment. // // When using LokiStack as a log store this operator does not manage the LokiStack, but only creates // configuration referencing an existing LokiStack deployment. The user is responsible for creating and // managing the LokiStack himself. // // +kubebuilder:validation:Enum=elasticsearch;lokistack // +kubebuilder:default:=lokistack Type LogStoreType `json:"type"` // Specification of the Elasticsearch Log Store component // +deprecated Elasticsearch *ElasticsearchSpec `json:"elasticsearch,omitempty"` // LokiStack contains information about which LokiStack to use for log storage if Type is set to LogStoreTypeLokiStack. // // The cluster-logging-operator does not create or manage the referenced LokiStack. LokiStack LokiStackStoreSpec `json:"lokistack,omitempty"` // Retention policy defines the maximum age for an Elasticsearch index after which it should be deleted // // +nullable // +optional // +deprecated RetentionPolicy *RetentionPoliciesSpec `json:"retentionPolicy,omitempty"` }
The LogStoreSpec contains information about how logs are stored.
func (*LogStoreSpec) DeepCopy ¶
func (in *LogStoreSpec) DeepCopy() *LogStoreSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogStoreSpec.
func (*LogStoreSpec) DeepCopyInto ¶
func (in *LogStoreSpec) DeepCopyInto(out *LogStoreSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type LogStoreStatus ¶
type LogStoreStatus struct { // +optional ElasticsearchStatus []ElasticsearchStatus `json:"elasticsearchStatus,omitempty"` }
func (*LogStoreStatus) DeepCopy ¶
func (in *LogStoreStatus) DeepCopy() *LogStoreStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogStoreStatus.
func (*LogStoreStatus) DeepCopyInto ¶
func (in *LogStoreStatus) DeepCopyInto(out *LogStoreStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type LogStoreType ¶
type LogStoreType string
const ( // NOTE: update the +kubebuilder:validation:Enum comment on LogStoreSpec.Type if you add values here. LogStoreTypeElasticsearch LogStoreType = "elasticsearch" LogStoreTypeLokiStack LogStoreType = "lokistack" )
type Loki ¶
type Loki struct { // TenantKey is a meta-data key field to use as the TenantID, // For example: 'TenantKey: kubernetes.namespace_name` will use the kubernetes // namespace as the tenant ID. // // +optional TenantKey string `json:"tenantKey,omitempty"` // LabelKeys is a list of log record keys that will be used as Loki labels with the corresponding log record value. // // If LabelKeys is not set, the default keys are `[log_type, kubernetes.namespace_name, kubernetes.pod_name, kubernetes_host]` // // Note: Loki label names must match the regular expression "[a-zA-Z_:][a-zA-Z0-9_:]*" // Log record keys may contain characters like "." and "/" that are not allowed in Loki labels. // Log record keys are translated to Loki labels by replacing any illegal characters with '_'. // For example the default log record keys translate to these Loki labels: `log_type`, `kubernetes_namespace_name`, `kubernetes_pod_name`, `kubernetes_host` // // Note: the set of labels should be small, Loki imposes limits on the size and number of labels allowed. // See https://grafana.com/docs/loki/latest/configuration/#limits_config for more. // Loki queries can also query based on any log record field (not just labels) using query filters. // // +optional LabelKeys []string `json:"labelKeys,omitempty"` }
Loki provides optional extra properties for `type: loki`
func (*Loki) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Loki.
func (*Loki) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type LokiStackStoreSpec ¶
type LokiStackStoreSpec struct { // Name of the LokiStack resource. // // +required Name string `json:"name"` }
LokiStackStoreSpec is used to set up cluster-logging to use a LokiStack as logging storage. It points to an existing LokiStack in the same namespace.
func (*LokiStackStoreSpec) DeepCopy ¶
func (in *LokiStackStoreSpec) DeepCopy() *LokiStackStoreSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackStoreSpec.
func (*LokiStackStoreSpec) DeepCopyInto ¶
func (in *LokiStackStoreSpec) DeepCopyInto(out *LokiStackStoreSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ManagementState ¶
type ManagementState string
const ( // Managed means that the operator is actively managing its resources and trying to keep the component active. // It will only upgrade the component if it is safe to do so ManagementStateManaged ManagementState = "Managed" // Unmanaged means that the operator will not take any action related to the component ManagementStateUnmanaged ManagementState = "Unmanaged" )
type NamedConditions ¶
type NamedConditions map[string]status.Conditions
func (NamedConditions) DeepCopy ¶
func (in NamedConditions) DeepCopy() NamedConditions
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedConditions.
func (NamedConditions) DeepCopyInto ¶
func (in NamedConditions) DeepCopyInto(out *NamedConditions)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (NamedConditions) IsAllReady ¶
func (nc NamedConditions) IsAllReady() bool
func (NamedConditions) SetCondition ¶
func (nc NamedConditions) SetCondition(name string, t status.ConditionType, s corev1.ConditionStatus, r status.ConditionReason, format string, args ...interface{}) bool
func (NamedConditions) Synchronize ¶
func (nc NamedConditions) Synchronize(newNamedCondition NamedConditions) error
Synchronize synchronizes the current NamedCondition with a new NamedCondition. This is not the same as simply replacing the NamedCondition: Conditions contain the LastTransitionTime field which is left unmodified by Synchronize for noops. Whereas all updates and additions shall use the current (= now) timestamp. In short, ignore any timestamp in newNamedCondition, and for noops use the timestamp from nc or use time.Now().
type NamespaceContainerSpec ¶
type NamespaceContainerSpec struct { // Namespace resources. Creates a combined file pattern together with Container resources. // Supports glob patterns and presumes "*" if ommitted. // Note: infrastructure namespaces are still excluded for "*" values unless a qualifying glob pattern is specified. // // +optional Namespace string `json:"namespace,omitempty"` // Container resources. Creates a combined file pattern together with Namespace resources. // Supports glob patterns and presumes "*" if ommitted. // // +optional Container string `json:"container,omitempty"` }
func (*NamespaceContainerSpec) DeepCopy ¶
func (in *NamespaceContainerSpec) DeepCopy() *NamespaceContainerSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceContainerSpec.
func (*NamespaceContainerSpec) DeepCopyInto ¶
func (in *NamespaceContainerSpec) DeepCopyInto(out *NamespaceContainerSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type NormalizerStatus ¶
type NormalizerStatus struct { // +optional FluentdStatus []FluentdNormalizerStatus `json:"fluentdStatus,omitempty"` }
func (*NormalizerStatus) DeepCopy ¶
func (in *NormalizerStatus) DeepCopy() *NormalizerStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NormalizerStatus.
func (*NormalizerStatus) DeepCopyInto ¶
func (in *NormalizerStatus) DeepCopyInto(out *NormalizerStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type NormalizerType ¶
type NormalizerType string
type OCPConsoleSpec ¶
type OCPConsoleSpec struct { // LogsLimit is the max number of entries returned for a query. // // +optional //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="OCP Console Log Limit",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:ocpConsoleLogLimit"} LogsLimit int `json:"logsLimit,omitempty"` // Timeout is the max duration before a query timeout // // +optional //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="OCP Console Query Timeout",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:ocpConsoleTimeout"} Timeout FluentdTimeUnit `json:"timeout,omitempty"` }
func (*OCPConsoleSpec) DeepCopy ¶
func (in *OCPConsoleSpec) DeepCopy() *OCPConsoleSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCPConsoleSpec.
func (*OCPConsoleSpec) DeepCopyInto ¶
func (in *OCPConsoleSpec) DeepCopyInto(out *OCPConsoleSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type OutputDefaults ¶
type OutputDefaults struct { // Elasticsearch OutputSpec default values // // Values specified here will be used as default values for Elasticsearch Output spec // // +kubebuilder:default:false // +optional Elasticsearch *ElasticsearchStructuredSpec `json:"elasticsearch,omitempty"` }
func (*OutputDefaults) DeepCopy ¶
func (in *OutputDefaults) DeepCopy() *OutputDefaults
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputDefaults.
func (*OutputDefaults) DeepCopyInto ¶
func (in *OutputDefaults) DeepCopyInto(out *OutputDefaults)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type OutputSecretSpec ¶
type OutputSecretSpec struct { // Name of a secret in the namespace configured for log forwarder secrets. // // +required Name string `json:"name"` }
OutputSecretSpec is a secret reference containing name only, no namespace.
func (*OutputSecretSpec) DeepCopy ¶
func (in *OutputSecretSpec) DeepCopy() *OutputSecretSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputSecretSpec.
func (*OutputSecretSpec) DeepCopyInto ¶
func (in *OutputSecretSpec) DeepCopyInto(out *OutputSecretSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type OutputSpec ¶
type OutputSpec struct { // Name used to refer to the output from a `pipeline`. // // +kubebuilder:validation:minLength:=1 // +required Name string `json:"name"` // Type of output plugin. // // +kubebuilder:validation:Enum:=syslog;fluentdForward;elasticsearch;kafka;cloudwatch;loki;googleCloudLogging;splunk;http;azureMonitor // +required Type string `json:"type"` // URL to send log records to. // // An absolute URL, with a scheme. Valid schemes depend on `type`. // Special schemes `tcp`, `tls`, `udp` and `udps` are used for types that // have no scheme of their own. For example, to send syslog records using secure UDP: // // { type: syslog, url: udps://syslog.example.com:1234 } // // Basic TLS is enabled if the URL scheme requires it (for example 'https' or 'tls'). // The 'username@password' part of `url` is ignored. // Any additional authentication material is in the `secret`. // See the `secret` field for more details. // // +kubebuilder:validation:Pattern:=`^$|[a-zA-z]+:\/\/.*` // +optional URL string `json:"url,omitempty"` OutputTypeSpec `json:",inline"` // TLS contains settings for controlling options on TLS client connections. TLS *OutputTLSSpec `json:"tls,omitempty"` // Secret for authentication. // // Names a secret in the same namespace as the ClusterLogForwarder. // Sensitive authentication information is stored in a separate Secret object. // A Secret is like a ConfigMap, where the keys are strings and the values are // base64-encoded binary data, for example TLS certificates. // // Common keys are described here. // Some output types support additional keys, documented with the output-specific configuration field. // All secret keys are optional, enable the security features you want by setting the relevant keys. // // Transport Layer Security (TLS) // // Using a TLS URL (`https://...` or `tls://...`) without any secret enables basic TLS: // client authenticates server using system default certificate authority. // // Additional TLS features are enabled by referencing a Secret with the following optional fields in its spec.data. // All data fields are base64 encoded. // // * `tls.crt`: A client certificate, for mutual authentication. Requires `tls.key`. // * `tls.key`: Private key to unlock the client certificate. Requires `tls.crt` // * `passphrase`: Passphrase to decode an encoded TLS private key. Requires tls.key. // * `ca-bundle.crt`: Custom CA to validate certificates. // // Username and Password // // * `username`: Authentication user name. Requires `password`. // * `password`: Authentication password. Requires `username`. // // Simple Authentication Security Layer (SASL) // // * `sasl.enable`: (boolean) Explicitly enable or disable SASL. // If missing, SASL is automatically enabled if any `sasl.*` keys are set. // * `sasl.mechanisms`: (array of string) List of allowed SASL mechanism names. // If missing or empty, the system defaults are used. // * `sasl.allow-insecure`: (boolean) Allow mechanisms that send clear-text passwords. // Default false. // // +optional Secret *OutputSecretSpec `json:"secret,omitempty"` // Limit imposes a limit in records-per-second on the total aggregate rate of logs forwarded // to this output from any given collector container. The total log flow from an individual collector // container to this output cannot exceed the limit. Generally, one collector is deployed per cluster node // Logs may be dropped to enforce the limit. Missing or 0 means no rate limit. // // +optional Limit *LimitSpec `json:"limit,omitempty"` // Tuning parameters for the output. Specifying these parameters will alter the characteristics // of log forwarder which may be different from its behavior without the tuning. // +optional Tuning *OutputTuningSpec `json:"tuning,omitempty"` }
Output defines a destination for log messages.
func (*OutputSpec) DeepCopy ¶
func (in *OutputSpec) DeepCopy() *OutputSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputSpec.
func (*OutputSpec) DeepCopyInto ¶
func (in *OutputSpec) DeepCopyInto(out *OutputSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*OutputSpec) GetMaxRecordsPerSecond ¶
func (output *OutputSpec) GetMaxRecordsPerSecond() int64
func (*OutputSpec) HasPolicy ¶
func (output *OutputSpec) HasPolicy() bool
HasPolicy returns whether the output spec has flow control policies defined in it.
type OutputTLSSpec ¶
type OutputTLSSpec struct { // If InsecureSkipVerify is true, then the TLS client will be configured to ignore errors with certificates. // // This option is *not* recommended for production configurations. InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` // TLSSecurityProfile is the security profile to apply to the output connection TLSSecurityProfile *openshiftv1.TLSSecurityProfile `json:"securityProfile,omitempty"` }
OutputTLSSpec contains options for TLS connections that are agnostic to the output type.
func (*OutputTLSSpec) DeepCopy ¶
func (in *OutputTLSSpec) DeepCopy() *OutputTLSSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputTLSSpec.
func (*OutputTLSSpec) DeepCopyInto ¶
func (in *OutputTLSSpec) DeepCopyInto(out *OutputTLSSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type OutputTuningSpec ¶
type OutputTuningSpec struct { // Delivery mode for log forwarding. // // - AtLeastOnce (default): if the forwarder crashes or is re-started, any logs that were read before // the crash but not sent to their destination will be re-read and re-sent. Note it is possible // that some logs are duplicated in the event of a crash - log records are delivered at-least-once. // - AtMostOnce: The forwarder makes no effort to recover logs lost during a crash. This mode may give // better throughput, but could result in more log loss. // // +required // +kubebuilder:validation:Enum:=AtLeastOnce;AtMostOnce Delivery string `json:"delivery,omitempty"` // Compression causes data to be compressed before sending over the network. // It is an error if the compression type is not supported by the output. // // +optional // +kubebuilder:validation:Enum:=gzip;none;snappy;zlib;zstd;lz4 Compression string `json:"compression,omitempty"` // MaxWrite limits the maximum payload in terms of bytes of a single "send" to the output. // // +optional MaxWrite *resource.Quantity `json:"maxWrite,omitempty"` // MinRetryDuration is the minimum time to wait between attempts to retry after delivery a failure. // // +optional MinRetryDuration *time.Duration `json:"minRetryDuration,omitempty"` // MaxRetryDuration is the maximum time to wait between retry attempts after a delivery failure. // // +optional MaxRetryDuration *time.Duration `json:"maxRetryDuration,omitempty"` }
OutputTuningSpec tuning parameters for an output
func (*OutputTuningSpec) DeepCopy ¶
func (in *OutputTuningSpec) DeepCopy() *OutputTuningSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputTuningSpec.
func (*OutputTuningSpec) DeepCopyInto ¶
func (in *OutputTuningSpec) DeepCopyInto(out *OutputTuningSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type OutputTypeSpec ¶
type OutputTypeSpec struct { // +optional Syslog *Syslog `json:"syslog,omitempty"` // +optional FluentdForward *FluentdForward `json:"fluentdForward,omitempty"` // +optional Elasticsearch *Elasticsearch `json:"elasticsearch,omitempty"` // +optional Kafka *Kafka `json:"kafka,omitempty"` // +optional Cloudwatch *Cloudwatch `json:"cloudwatch,omitempty"` // +optional Loki *Loki `json:"loki,omitempty"` // +optional GoogleCloudLogging *GoogleCloudLogging `json:"googleCloudLogging,omitempty"` // +optional Splunk *Splunk `json:"splunk,omitempty"` // +optional Http *Http `json:"http,omitempty"` // +optional AzureMonitor *AzureMonitor `json:"azureMonitor,omitempty"` }
OutputTypeSpec is a union of optional additional configuration specific to an output type. The fields of this struct define the set of known output types.
func (*OutputTypeSpec) DeepCopy ¶
func (in *OutputTypeSpec) DeepCopy() *OutputTypeSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputTypeSpec.
func (*OutputTypeSpec) DeepCopyInto ¶
func (in *OutputTypeSpec) DeepCopyInto(out *OutputTypeSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type OverflowActionType ¶
type OverflowActionType string
const ( // ThrowExceptionAction raises an exception when output buffer is full ThrowExceptionAction OverflowActionType = "throw_exception" // BlockAction blocks processing inputs when output buffer is full BlockAction OverflowActionType = "block" // DropOldestChunkAction drops oldest chunk to accept newly incoming chunks // when buffer is full DropOldestChunkAction OverflowActionType = "drop_oldest_chunk" )
type PipelineSpec ¶
type PipelineSpec struct { // OutputRefs lists the names (`output.name`) of outputs from this pipeline. // // The following built-in names are always available: // // 'default' Output to the default log store provided by ClusterLogging. // // +required OutputRefs []string `json:"outputRefs"` // InputRefs lists the names (`input.name`) of inputs to this pipeline. // // The following built-in input names are always available: // // `application` selects all logs from application pods. // // `infrastructure` selects logs from openshift and kubernetes pods and some node logs. // // `audit` selects node logs related to security audits. // // +required InputRefs []string `json:"inputRefs"` // Filters lists the names of filters to be applied to records going through this pipeline. // // Each filter is applied in order. // If a filter drops a records, subsequent filters are not applied. // +optional FilterRefs []string `json:"filterRefs,omitempty"` // Labels applied to log records passing through this pipeline. // These labels appear in the `openshift.labels` map in the log record. // // +optional Labels map[string]string `json:"labels,omitempty"` // Name is optional, but must be unique in the `pipelines` list if provided. // // +optional Name string `json:"name,omitempty"` // Parse enables parsing of log entries into structured logs // // Logs are parsed according to parse value, only `json` is supported as of now. // // +kubebuilder:validation:Enum:=json // +optional Parse string `json:"parse,omitempty"` // DetectMultilineErrors enables multiline error detection of container logs // // +optional DetectMultilineErrors bool `json:"detectMultilineErrors,omitempty"` }
PipelinesSpec link a set of inputs to a set of outputs.
func (*PipelineSpec) DeepCopy ¶
func (in *PipelineSpec) DeepCopy() *PipelineSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineSpec.
func (*PipelineSpec) DeepCopyInto ¶
func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type PodStateMap ¶
type PodStateMap map[PodStateType][]string
func (PodStateMap) DeepCopy ¶
func (in PodStateMap) DeepCopy() PodStateMap
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStateMap.
func (PodStateMap) DeepCopyInto ¶
func (in PodStateMap) DeepCopyInto(out *PodStateMap)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type PodStateType ¶
type PodStateType string
const ( PodStateTypeReady PodStateType = "ready" PodStateTypeNotReady PodStateType = "notReady" PodStateTypeFailed PodStateType = "failed" )
type ProxySpec ¶
type ProxySpec struct { // +nullable Resources *v1.ResourceRequirements `json:"resources,omitempty"` }
func (*ProxySpec) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxySpec.
func (*ProxySpec) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type PruneFilterSpec ¶
type PruneFilterSpec struct { // `In` is an array of dot-delimited field paths. Fields included here are removed from the log record. // Each field path expression must start with a `.`. // The path can contain alpha-numeric characters and underscores (a-zA-Z0-9_). // If segments contain characters outside of this range, the segment must be quoted otherwise paths do NOT need to be quoted. // Examples: `.kubernetes.namespace_name`, `.log_type`, '.kubernetes.labels.foobar', `.kubernetes.labels."foo-bar/baz"` // NOTE1: `In` CANNOT contain `.log_type` or `.message` as those fields are required and cannot be pruned. // NOTE2: If this filter is used in a pipeline with GoogleCloudLogging, `.hostname` CANNOT be added to this list as it is a required field. // +optional In []string `json:"in,omitempty"` // `NotIn` is an array of dot-delimited field paths. All fields besides the ones listed here are removed from the log record // Each field path expression must start with a `.`. // The path can contain alpha-numeric characters and underscores (a-zA-Z0-9_). // If segments contain characters outside of this range, the segment must be quoted otherwise paths do NOT need to be quoted. // Examples: `.kubernetes.namespace_name`, `.log_type`, '.kubernetes.labels.foobar', `.kubernetes.labels."foo-bar/baz"` // NOTE1: `NotIn` MUST contain `.log_type` and `.message` as those fields are required and cannot be pruned. // NOTE2: If this filter is used in a pipeline with GoogleCloudLogging, `.hostname` MUST be added to this list as it is a required field. // +optional NotIn []string `json:"notIn,omitempty"` }
func (*PruneFilterSpec) DeepCopy ¶
func (in *PruneFilterSpec) DeepCopy() *PruneFilterSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PruneFilterSpec.
func (*PruneFilterSpec) DeepCopyInto ¶
func (in *PruneFilterSpec) DeepCopyInto(out *PruneFilterSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ReceiverSpec ¶
type ReceiverSpec struct { // Type of Receiver plugin. // +optional Type string `json:"type"` // The ReceiverTypeSpec that handles particular parameters // +optional *ReceiverTypeSpec `json:",inline,omitempty"` }
ReceiverSpec is a union of input Receiver types.
The fields of this struct define the set of known Receiver types.
func (*ReceiverSpec) DeepCopy ¶
func (in *ReceiverSpec) DeepCopy() *ReceiverSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReceiverSpec.
func (*ReceiverSpec) DeepCopyInto ¶
func (in *ReceiverSpec) DeepCopyInto(out *ReceiverSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*ReceiverSpec) GetHTTPFormat ¶
func (receiver *ReceiverSpec) GetHTTPFormat() (ret string)
func (*ReceiverSpec) GetHTTPPort ¶
func (receiver *ReceiverSpec) GetHTTPPort() (ret int32)
func (*ReceiverSpec) GetSyslogPort ¶
func (receiver *ReceiverSpec) GetSyslogPort() (ret int32)
func (*ReceiverSpec) IsAuditHttpReceiver ¶
func (receiver *ReceiverSpec) IsAuditHttpReceiver() bool
func (*ReceiverSpec) IsHttpReceiver ¶
func (receiver *ReceiverSpec) IsHttpReceiver() bool
func (*ReceiverSpec) IsSyslogReceiver ¶
func (receiver *ReceiverSpec) IsSyslogReceiver() bool
type ReceiverTypeSpec ¶
type ReceiverTypeSpec struct { HTTP *HTTPReceiver `json:"http,omitempty"` Syslog *SyslogReceiver `json:"syslog,omitempty"` }
func (*ReceiverTypeSpec) DeepCopy ¶
func (in *ReceiverTypeSpec) DeepCopy() *ReceiverTypeSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReceiverTypeSpec.
func (*ReceiverTypeSpec) DeepCopyInto ¶
func (in *ReceiverTypeSpec) DeepCopyInto(out *ReceiverTypeSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type RetentionPoliciesSpec ¶
type RetentionPoliciesSpec struct { // +nullable App *RetentionPolicySpec `json:"application,omitempty"` // +nullable Infra *RetentionPolicySpec `json:"infra,omitempty"` // +nullable Audit *RetentionPolicySpec `json:"audit,omitempty"` }
func (*RetentionPoliciesSpec) DeepCopy ¶
func (in *RetentionPoliciesSpec) DeepCopy() *RetentionPoliciesSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPoliciesSpec.
func (*RetentionPoliciesSpec) DeepCopyInto ¶
func (in *RetentionPoliciesSpec) DeepCopyInto(out *RetentionPoliciesSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type RetentionPolicySpec ¶
type RetentionPolicySpec struct { // +optional MaxAge elasticsearch.TimeUnit `json:"maxAge"` // How often to run a new prune-namespaces job // +optional PruneNamespacesInterval elasticsearch.TimeUnit `json:"pruneNamespacesInterval"` // The per namespace specification to delete documents older than a given minimum age // +optional Namespaces []elasticsearch.IndexManagementDeleteNamespaceSpec `json:"namespaceSpec,omitempty"` // The threshold percentage of ES disk usage that when reached, old indices should be deleted (e.g. 75) // +optional DiskThresholdPercent int64 `json:"diskThresholdPercent,omitempty"` }
func (*RetentionPolicySpec) DeepCopy ¶
func (in *RetentionPolicySpec) DeepCopy() *RetentionPolicySpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicySpec.
func (*RetentionPolicySpec) DeepCopyInto ¶
func (in *RetentionPolicySpec) DeepCopyInto(out *RetentionPolicySpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type RetryTypeType ¶
type RetryTypeType string
const ( // RetryExponentialBackoff increases wait time exponentially between failures RetryExponentialBackoff RetryTypeType = "exponential_backoff" // RetryPeriodic to retry sending to output periodically on fixed intervals RetryPeriodic RetryTypeType = "periodic" )
type RouteMap ¶
RouteMap maps input names to connected outputs or vice-versa.
func (RouteMap) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteMap.
func (RouteMap) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Routes ¶
type Routes struct {
ByInput, ByOutput RouteMap
}
Routes maps connected input and output names.
func NewRoutes ¶
func NewRoutes(pipelines []PipelineSpec) Routes
func (*Routes) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Routes.
func (*Routes) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Splunk ¶
type Splunk struct { // IndexKey is a meta-data key field to use to send events to. // For example: 'IndexKey: kubernetes.namespace_name` will use the kubernetes // namespace as the index. // If the IndexKey is not found, the default index defined within Splunk is used. // Only one of IndexKey or IndexName can be defined. // If IndexKey && IndexName are not specified, the default index defined within Splunk is used. // +optional IndexKey string `json:"indexKey,omitempty"` // IndexName is the name of the index to send events to. // Only one of IndexKey or IndexName can be defined. // If IndexKey && IndexName are not specified, the default index defined within Splunk is used. // +optional IndexName string `json:"indexName,omitempty"` // Deprecated. Fields to be added to Splunk index. // +optional // +deprecated Fields []string `json:"fields,omitempty"` }
Splunk Deliver log data to Splunk’s HTTP Event Collector Provides optional extra properties for `type: splunk_hec` ('splunk_hec_logs' after Vector 0.23
func (*Splunk) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Splunk.
func (*Splunk) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Syslog ¶
type Syslog struct { // Severity to set on outgoing syslog records. // // Severity values are defined in https://tools.ietf.org/html/rfc5424#section-6.2.1 // The value can be a decimal integer or one of these case-insensitive keywords: // // Emergency Alert Critical Error Warning Notice Informational Debug // // +optional Severity string `json:"severity,omitempty"` // Facility to set on outgoing syslog records. // // Facility values are defined in https://tools.ietf.org/html/rfc5424#section-6.2.1. // The value can be a decimal integer. Facility keywords are not standardized, // this API recognizes at least the following case-insensitive keywords // (defined by https://en.wikipedia.org/wiki/Syslog#Facility_Levels): // // kernel user mail daemon auth syslog lpr news // uucp cron authpriv ftp ntp security console solaris-cron // local0 local1 local2 local3 local4 local5 local6 local7 // // +optional Facility string `json:"facility,omitempty"` // TrimPrefix is a prefix to trim from the tag. // // +optional TrimPrefix string `json:"trimPrefix,omitempty"` // Tag specifies a record field to use as tag. // // +optional Tag string `json:"tag,omitempty"` // PayloadKey specifies record field to use as payload. // // +optional PayloadKey string `json:"payloadKey,omitempty"` // AddLogSource adds log's source information to the log message // If the logs are collected from a process; namespace_name, pod_name, container_name is added to the log // In addition, it picks the originating process name and id(known as the `pid`) from the record // and injects them into the header field." // // +optional AddLogSource bool `json:"addLogSource,omitempty"` // Rfc specifies the rfc to be used for sending syslog // // Rfc values can be one of: // - RFC3164 (https://tools.ietf.org/html/rfc3164) // - RFC5424 (https://tools.ietf.org/html/rfc5424) // // If unspecified, RFC5424 will be assumed. // // +kubebuilder:validation:Enum:=RFC3164;RFC5424 // +kubebuilder:default:=RFC5424 // +optional RFC string `json:"rfc,omitempty"` // AppName is APP-NAME part of the syslog-msg header // // AppName needs to be specified if using rfc5424 // // +optional AppName string `json:"appName,omitempty"` // ProcID is PROCID part of the syslog-msg header // // ProcID needs to be specified if using rfc5424 // // +optional ProcID string `json:"procID,omitempty"` // MsgID is MSGID part of the syslog-msg header // // MsgID needs to be specified if using rfc5424 // // +optional MsgID string `json:"msgID,omitempty"` }
Syslog provides optional extra properties for output type `syslog`
func (*Syslog) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Syslog.
func (*Syslog) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type SyslogReceiver ¶
type SyslogReceiver struct { // Port the Receiver listens on. It must be a value between 1024 and 65535 // +kubebuilder:default:=10514 // +kubebuilder:validation:Minimum:=1024 // +kubebuilder:validation:Maximum:=65535 // +optional Port int32 `json:"port"` }
SyslogReceiver receives logs from rsyslog
func (*SyslogReceiver) DeepCopy ¶
func (in *SyslogReceiver) DeepCopy() *SyslogReceiver
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyslogReceiver.
func (*SyslogReceiver) DeepCopyInto ¶
func (in *SyslogReceiver) DeepCopyInto(out *SyslogReceiver)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type VisualizationSpec ¶
type VisualizationSpec struct { // The type of Visualization to configure // // +kubebuilder:validation:Enum=ocp-console;kibana Type VisualizationType `json:"type"` // Define which Nodes the Pods are scheduled on. // // +nullable //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Visualization Node Selector",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:nodeSelector"} NodeSelector map[string]string `json:"nodeSelector,omitempty"` // Define the tolerations the Pods will accept // +nullable // +optional //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Visualization Pod Tolerations",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Toleration"} Tolerations []v1.Toleration `json:"tolerations,omitempty"` // Specification of the Kibana Visualization component // // +deprecated // +nullable // +optional Kibana *KibanaSpec `json:"kibana,omitempty"` // OCPConsole is the specification for the OCP console plugin // // +nullable // +optional OCPConsole *OCPConsoleSpec `json:"ocpConsole,omitempty"` }
This is the struct that will contain information pertinent to Log visualization (Kibana)
func (*VisualizationSpec) DeepCopy ¶
func (in *VisualizationSpec) DeepCopy() *VisualizationSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VisualizationSpec.
func (*VisualizationSpec) DeepCopyInto ¶
func (in *VisualizationSpec) DeepCopyInto(out *VisualizationSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type VisualizationStatus ¶
type VisualizationStatus struct { // +optional KibanaStatus []elasticsearch.KibanaStatus `json:"kibanaStatus,omitempty"` }
func (*VisualizationStatus) DeepCopy ¶
func (in *VisualizationStatus) DeepCopy() *VisualizationStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VisualizationStatus.
func (*VisualizationStatus) DeepCopyInto ¶
func (in *VisualizationStatus) DeepCopyInto(out *VisualizationStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type VisualizationType ¶
type VisualizationType string
const ( VisualizationTypeKibana VisualizationType = "kibana" VisualizationTypeOCPConsole VisualizationType = "ocp-console" )