Documentation ¶
Overview ¶
Package defaults contains the default values for various configurable options of a StorageCluster
Index ¶
Constants ¶
View Source
const ( // NodeAffinityKey is the node label to determine which nodes belong // to a storage cluster NodeAffinityKey = "cluster.ocs.openshift.io/openshift-storage" // NodeTolerationKey is the taint all OCS Pods should tolerate NodeTolerationKey = "node.ocs.openshift.io/storage" // RackTopologyKey is the node label used to distribute storage nodes // when there are not enough AZs presnet across the nodes RackTopologyKey = "topology.rook.io/rack" // KubeMajorTopologySpreadConstraints is the minimum major kube version to support TSC // used along with KubeMinorTSC for version comparison KubeMajorTopologySpreadConstraints = "1" // KubeMinorTopologySpreadConstraints is the minimum minor kube version to support TSC // used along with KubeMajorTSC for version comparison KubeMinorTopologySpreadConstraints = "19" )
Variables ¶
View Source
var ( // DefaultMonCount is the number of monitors to be configured for the CephCluster DefaultMonCount = 3 // ArbiterModeMonCount is the number of monitors to be configured for the CephCluster in arbiter mode ArbiterModeMonCount = 5 // DeviceSetReplica is the default number of Rook-Ceph // StorageClassDeviceSets per StorageCluster StorageDeviceSet // This is equal to the default number of failure domains for OSDs DeviceSetReplica = 3 // CephObjectStoreGatewayInstances is the default number of RGW instances to create CephObjectStoreGatewayInstances = 1 // ArbiterCephObjectStoreGatewayInstances is the default number of RGW instances to create when arbiter is enabled ArbiterCephObjectStoreGatewayInstances = 2 // IsUnsupportedCephVersionAllowed is a string that determines if the CephCluster should allow unsupported ceph version image IsUnsupportedCephVersionAllowed = "" // ArbiterModeDeviceSetReplica is the default number of Rook-Ceph // StorageClassDeviceSets per StorageCluster StorageDeviceSet when arbiter is enabled // This is equal to the default number of failure domains for OSDs when arbiter is enabled ArbiterModeDeviceSetReplica = 2 // ReplicasPerFailureDomain is the default replica count in the failure domain // This maps to the ReplicasPerFailureDomain in the CephReplicatedSpec when creating the CephBlockPools ReplicasPerFailureDomain = 1 // ArbiterReplicasPerFailureDomain is the default replica count in the failure domain when arbiter is enabled // This maps to the ReplicasPerFailureDomain in the CephReplicatedSpec when creating the CephBlockPools ArbiterReplicasPerFailureDomain = 2 )
View Source
var ( // DefaultNodeAffinity is the NodeAffinity to be used when labelSelector is nil DefaultNodeAffinity = &corev1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: getOcsNodeSelector(), } // DaemonPlacements map contains the default placement configs for the // various OCS daemons DaemonPlacements = map[string]rookCephv1.Placement{ "all": { Tolerations: []corev1.Toleration{ getOcsToleration(), }, }, "mon": { PodAntiAffinity: &corev1.PodAntiAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ getPodAffinityTerm("rook-ceph-mon"), }, }, }, "osd": { Tolerations: []corev1.Toleration{ getOcsToleration(), }, PodAntiAffinity: &corev1.PodAntiAffinity{ PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ getWeightedPodAffinityTerm(100, "rook-ceph-osd"), }, }, }, "osd-prepare": { Tolerations: []corev1.Toleration{ getOcsToleration(), }, PodAntiAffinity: &corev1.PodAntiAffinity{ PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ getWeightedPodAffinityTerm(100, "rook-ceph-osd-prepare"), }, }, }, "osd-tsc": { Tolerations: []corev1.Toleration{ getOcsToleration(), }, TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ getTopologySpreadConstraintsSpec(1), }, }, "osd-prepare-tsc": { Tolerations: []corev1.Toleration{ getOcsToleration(), }, TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ getTopologySpreadConstraintsSpec(1), }, }, "rgw": { Tolerations: []corev1.Toleration{ getOcsToleration(), }, PodAntiAffinity: &corev1.PodAntiAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ getPodAffinityTerm("rook-ceph-rgw"), }, PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ getWeightedPodAffinityTerm(100, "rook-ceph-rgw"), }, }, }, "mds": { Tolerations: []corev1.Toleration{ getOcsToleration(), }, PodAntiAffinity: &corev1.PodAntiAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ getPodAffinityTerm("rook-ceph-mds"), }, }, }, "noobaa-core": { Tolerations: []corev1.Toleration{ getOcsToleration(), }, }, } )
View Source
var ( // DaemonResources map contains the default resource requirements for the // various OCS daemons DaemonResources = map[string]corev1.ResourceRequirements{ "osd": { Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("2"), corev1.ResourceMemory: resource.MustParse("5Gi"), }, Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("2"), corev1.ResourceMemory: resource.MustParse("5Gi"), }, }, "mon": { Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("2Gi"), }, Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("2Gi"), }, }, "mds": { Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("3"), corev1.ResourceMemory: resource.MustParse("8Gi"), }, Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("3"), corev1.ResourceMemory: resource.MustParse("8Gi"), }, }, "rgw": { Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("2"), corev1.ResourceMemory: resource.MustParse("4Gi"), }, Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("2"), corev1.ResourceMemory: resource.MustParse("4Gi"), }, }, "mgr": { Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("3Gi"), }, Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("3Gi"), }, }, "mgr-sidecar": { Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("40Mi"), }, Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("100Mi"), }, }, "noobaa-core": { Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("4Gi"), }, Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("4Gi"), }, }, "noobaa-db": { Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("500m"), corev1.ResourceMemory: resource.MustParse("4Gi"), }, Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("500m"), corev1.ResourceMemory: resource.MustParse("4Gi"), }, }, "noobaa-db-vol": { Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("50Gi"), }, }, "noobaa-endpoint": { Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("2Gi"), }, Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("2Gi"), }, }, "rbd-mirror": { Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("2Gi"), }, Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("2Gi"), }, }, } )
Functions ¶
func GetDaemonResources ¶
func GetDaemonResources(name string, custom map[string]corev1.ResourceRequirements) corev1.ResourceRequirements
GetDaemonResources returns a custom ResourceRequirements for the passed name, if found in the passed resource map. If not, it returns the default value for the given name.
Types ¶
This section is empty.
Click to show internal directories.
Click to hide internal directories.