core

package
v0.0.0-...-73e42e7 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 4, 2020 License: Apache-2.0 Imports: 20 Imported by: 0

Documentation

Index

Constants

View Source
const (
	// MaxKubernetesEmptyNodeDeletionTime is the maximum time needed by Kubernetes to delete an empty node.
	MaxKubernetesEmptyNodeDeletionTime = 3 * time.Minute
	// MaxCloudProviderNodeDeletionTime is the maximum time needed by cloud provider to delete a node.
	MaxCloudProviderNodeDeletionTime = 5 * time.Minute
	// MaxPodEvictionTime is the maximum time CA tries to evict a pod before giving up.
	MaxPodEvictionTime = 2 * time.Minute
	// EvictionRetryTime is the time after CA retries failed pod eviction.
	EvictionRetryTime = 10 * time.Second
	// PodEvictionHeadroom is the extra time we wait to catch situations when the pod is ignoring SIGTERM and
	// is killed with SIGKILL after MaxGracefulTerminationTime
	PodEvictionHeadroom = 30 * time.Second
	// UnremovableNodeRecheckTimeout is the timeout before we check again a node that couldn't be removed before
	UnremovableNodeRecheckTimeout = 5 * time.Minute
)

Variables

This section is empty.

Functions

func CommonNodes

func CommonNodes(nodelist1 []*apiv1.Node, nodelist2 []*apiv1.Node) []*apiv1.Node

Get common nodes between two nodelists

func ContainsNode

func ContainsNode(node *apiv1.Node, nodelist []*apiv1.Node) bool

Check if nodelist contains passed node

Types

type Autorepair

type Autorepair interface {
	// RunOnce represents an iteration in the control-loop of CA
	RunOnce(time.Duration) errors.AutorepairError
}

Autorepair is the main component of CA which scales up/down node groups according to its configuration The configuration can be injected at the creation of an autorepair

func NewAutorepair

func NewAutorepair(opts AutorepairOptions, kubeClient kube_client.Interface,
	kubeEventRecorder kube_record.EventRecorder, listerRegistry kube_util.ListerRegistry) (Autorepair, errors.AutorepairError)

NewAutorepair creates an autorepair of an appropriate type according to the parameters

type AutorepairBuilder

type AutorepairBuilder interface {
	//SetDynamicConfig(config dynamic.Config) AutorepairBuilder
	Build() (Autorepair, errors.AutorepairError)
}

AutorepairBuilder builds an instance of Autorepair which is the core of CA

type AutorepairBuilderImpl

type AutorepairBuilderImpl struct {
	// contains filtered or unexported fields
}

AutorepairBuilderImpl builds new autorepairs from its state including initial `AutoscalingOptions` given at startup and `dynamic.Config` read on demand from the configmap

func NewAutorepairBuilder

func NewAutorepairBuilder(autorepairingOptions AutorepairingOptions,
	kubeClient kube_client.Interface, kubeEventRecorder kube_record.EventRecorder, listerRegistry kube_util.ListerRegistry) *AutorepairBuilderImpl

NewAutorepairBuilder builds an AutorepairBuilder from required parameters

func (*AutorepairBuilderImpl) Build

Build an autorepair according to the builder's state

type AutorepairOptions

type AutorepairOptions struct {
	AutorepairingOptions
}

AutorepairOptions is the whole set of options for configuring an autorepair

type AutorepairingContext

type AutorepairingContext struct {
	// Options to customize how autoscaling works
	AutorepairingOptions
	// CloudProvider used in CA.
	CloudProvider cloudprovider.CloudProvider
	// ClientSet interface.
	ClientSet kube_client.Interface
	// ClusterState for maintaining the state of custer nodes.
	ClusterStateRegistry *clusterstate.ClusterStateRegistry
	// Recorder for recording events.
	Recorder kube_record.EventRecorder
	// // PredicateChecker to check if a pod can fit into a node.
	// PredicateChecker *simulator.PredicateChecker
	// // ExpanderStrategy is the strategy used to choose which node group to expand when scaling up
	// ExpanderStrategy expander.Strategy
	// LogRecorder can be used to collect log messages to expose via Events on some central object.
	LogRecorder *utils.LogEventRecorder
}

AutoscalingContext contains user-configurable constant and configuration-related objects passed to scale up/scale down functions.

func NewAutorepairContext

func NewAutorepairContext(options AutorepairingOptions,
	kubeClient kube_client.Interface, kubeEventRecorder kube_record.EventRecorder,
	logEventRecorder *utils.LogEventRecorder, listerRegistry kube_util.ListerRegistry) (*AutorepairingContext, errors.AutorepairError)

NewAutoscalingContext returns an autoscaling context from all the necessary parameters passed via arguments

type AutorepairingOptions

type AutorepairingOptions struct {
	// MaxEmptyBulkDelete is a number of empty nodes that can be removed at the same time.
	MaxEmptyBulkDelete int
	// ScaleDownUtilizationThreshold sets threshold for nodes to be considered for scale down.
	// Well-utilized nodes are not touched.
	ScaleDownUtilizationThreshold float64
	// ScaleDownUnneededTime sets the duration CA expects a node to be unneeded/eligible for removal
	// before scaling down the node.
	ScaleDownUnneededTime time.Duration
	// ScaleDownUnreadyTime represents how long an unready node should be unneeded before it is eligible for scale down
	ScaleDownUnreadyTime time.Duration
	// MaxNodesTotal sets the maximum number of nodes in the whole cluster
	MaxNodesTotal int
	// MaxCoresTotal sets the maximum number of cores in the whole cluster
	MaxCoresTotal int64
	// MinCoresTotal sets the minimum number of cores in the whole cluster
	MinCoresTotal int64
	// MaxMemoryTotal sets the maximum memory (in megabytes) in the whole cluster
	MaxMemoryTotal int64
	// MinMemoryTotal sets the maximum memory (in megabytes) in the whole cluster
	MinMemoryTotal int64
	// NodeGroupAutoDiscovery represents one or more definition(s) of node group auto-discovery
	NodeGroupAutoDiscovery string
	// UnregisteredNodeRemovalTime represents how long CA waits before removing nodes that are not registered in Kubernetes")
	UnregisteredNodeRemovalTime time.Duration
	// EstimatorName is the estimator used to estimate the number of needed nodes in scale up.
	EstimatorName string
	// ExpanderName sets the type of node group expander to be used in scale up
	ExpanderName string
	// MaxGracefulTerminationSec is maximum number of seconds scale down waits for pods to terminate before
	// removing the node from cloud provider.
	MaxGracefulTerminationSec int
	//  Maximum time CA waits for node to be provisioned
	MaxNodeProvisionTime time.Duration
	// MaxTotalUnreadyPercentage is the maximum percentage of unready nodes after which CA halts operations
	MaxTotalUnreadyPercentage float64
	// OkTotalUnreadyCount is the number of allowed unready nodes, irrespective of max-total-unready-percentage
	OkTotalUnreadyCount int
	// CloudConfig is the path to the cloud provider configuration file. Empty string for no configuration file.
	CloudConfig string
	// CloudProviderName sets the type of the cloud provider CA is about to run in. Allowed values: gce, aws
	CloudProviderName string
	// NodeGroups is the list of node groups a.k.a autoscaling targets
	NodeGroups []string
	// ScaleDownEnabled is used to allow CA to scale down the cluster
	ScaleDownEnabled bool
	// ScaleDownDelayAfterAdd sets the duration from the last scale up to the time when CA starts to check scale down options
	ScaleDownDelayAfterAdd time.Duration
	// ScaleDownDelayAfterDelete sets the duration between scale down attempts if scale down removes one or more nodes
	ScaleDownDelayAfterDelete time.Duration
	// ScaleDownDelayAfterFailure sets the duration before the next scale down attempt if scale down results in an error
	ScaleDownDelayAfterFailure time.Duration
	// ScaleDownNonEmptyCandidatesCount is the maximum number of non empty nodes
	// considered at once as candidates for scale down.
	ScaleDownNonEmptyCandidatesCount int
	// ScaleDownCandidatesPoolRatio is a ratio of nodes that are considered
	// as additional non empty candidates for scale down when some candidates from
	// previous iteration are no longer valid.
	ScaleDownCandidatesPoolRatio float64
	// ScaleDownCandidatesPoolMinCount is the minimum number of nodes that are
	// considered as additional non empty candidates for scale down when some
	// candidates from previous iteration are no longer valid.
	// The formula to calculate additional candidates number is following:
	// max(#nodes * ScaleDownCandidatesPoolRatio, ScaleDownCandidatesPoolMinCount)
	ScaleDownCandidatesPoolMinCount int
	// WriteStatusConfigMap tells if the status information should be written to a ConfigMap
	WriteStatusConfigMap bool
	// BalanceSimilarNodeGroups enables logic that identifies node groups with similar machines and tries to balance node count between them.
	BalanceSimilarNodeGroups bool
	// ConfigNamespace is the namespace cluster-autorepair is running in and all related configmaps live in
	ConfigNamespace string
	// ClusterName if available
	ClusterName string
	// NodeAutoprovisioningEnabled tells whether the node auto-provisioning is enabled for this cluster.
	NodeAutoprovisioningEnabled bool
	// MaxAutoprovisionedNodeGroupCount is the maximum number of autoprovisioned groups in the cluster.
	MaxAutoprovisionedNodeGroupCount int
}

AutoscalingOptions contain various options to customize how autoscaling works

type DynamicAutorepair

type DynamicAutorepair struct {
	// contains filtered or unexported fields
}

DynamicAutorepair is a variant of autorepair which supports dynamic reconfiguration at runtime

func NewDynamicAutorepair

func NewDynamicAutorepair(autorepairBuilder AutorepairBuilder) (*DynamicAutorepair, errors.AutorepairError)

NewDynamicAutorepair builds a DynamicAutorepair from required parameters

func (*DynamicAutorepair) RunOnce

func (a *DynamicAutorepair) RunOnce(repairtime time.Duration) errors.AutorepairError

Dynamic method used to invoke the static RunOnce method to repair nodes

type StaticAutorepair

type StaticAutorepair struct {
	*AutorepairingContext
	kube_util.ListerRegistry
	// contains filtered or unexported fields
}

StaticAutorepair is an autorepair which has all the core functionality of a CA but without the reconfiguration feature

func NewStaticAutorepair

func NewStaticAutorepair(opts AutorepairingOptions,
	kubeClient kube_client.Interface, kubeEventRecorder kube_record.EventRecorder, listerRegistry kube_util.ListerRegistry) (*StaticAutorepair, errors.AutorepairError)

NewStaticAutorepair creates an instance of Autorepair filled with provided parameters

func (*StaticAutorepair) AsgNotReadyNodes

func (a *StaticAutorepair) AsgNotReadyNodes(desiredAsg cloudprovider.NodeGroup, notReadyNodes []*apiv1.Node) ([]*apiv1.Node, error)

Method used to extract not ready nodes for a particular cluster, notReadyNodes passed (maybe stale node status)

func (*StaticAutorepair) AsgReadyNodes

func (a *StaticAutorepair) AsgReadyNodes(desiredAsg cloudprovider.NodeGroup) ([]*apiv1.Node, error)

Ready ASG nodes based on real-time, used to poll on newly created nodes (Latest node status)

func (*StaticAutorepair) AsgRepairNodes

func (a *StaticAutorepair) AsgRepairNodes(asg cloudprovider.NodeGroup, toBeRepairedNodes []*apiv1.Node)

Repair nodes in a particular ASG

func (*StaticAutorepair) CalcExpandSize

func (a *StaticAutorepair) CalcExpandSize(asg cloudprovider.NodeGroup, toBeRepairedNodes []*apiv1.Node) int

Calculate possible expansion size for a ASG based on allowable maximum size

func (*StaticAutorepair) DeleteNodes

func (a *StaticAutorepair) DeleteNodes(asg cloudprovider.NodeGroup, nodeList []*apiv1.Node)

Delete nodes from ASG, Blocking call

func (*StaticAutorepair) IncreaseSize

func (a *StaticAutorepair) IncreaseSize(asg cloudprovider.NodeGroup, toBeRepairedNodes []*apiv1.Node, incSize int)

Increases size of ASG, Blocking call untill all nodes are ready

func (*StaticAutorepair) RepairNodes

func (a *StaticAutorepair) RepairNodes(toBeRepairedNodes []*apiv1.Node)

Repair nodes for a particular kubernetes cluster

func (*StaticAutorepair) RunOnce

func (a *StaticAutorepair) RunOnce(repairtime time.Duration) errors.AutorepairError

RunOnce iterates over node groups and repairs them if necessary

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL