Documentation ¶
Overview ¶
Package config to provide conditions for CephCluster
Package controller provides Kubernetes controller/pod/container spec items used for many Ceph daemons
Package controller provides Kubernetes controller/pod/container spec items used for many Ceph daemons
Index ¶
- Constants
- Variables
- func AddCephVersionLabelToDaemonSet(cephVersion version.CephVersion, d *apps.DaemonSet)
- func AddCephVersionLabelToDeployment(cephVersion version.CephVersion, d *apps.Deployment)
- func AddCephVersionLabelToJob(cephVersion version.CephVersion, j *batch.Job)
- func AddCephVersionLabelToObjectMeta(cephVersion version.CephVersion, meta *metav1.ObjectMeta)
- func AddFinalizerIfNotPresent(ctx context.Context, client client.Client, obj client.Object) error
- func AddVolumeMountSubPath(podSpec *v1.PodSpec, volumeMountName string)
- func AdminFlags(cluster *client.ClusterInfo) []string
- func AppLabels(appName, namespace string) map[string]string
- func CephDaemonAppLabels(appName, namespace, daemonType, daemonID, parentName, resourceKind string, ...) map[string]string
- func CephSecurityContext() *v1.SecurityContext
- func CephVolumeMounts(dataPaths *config.DataPathMap, confGeneratedInPod bool) []v1.VolumeMount
- func CheckPodMemory(name string, resources v1.ResourceRequirements, cephPodMinimumMemory uint64) error
- func ChownCephDataDirsInitContainer(dpm config.DataPathMap, containerImage string, ...) v1.Container
- func ClusterOwnerRef(clusterName, clusterID string) metav1.OwnerReference
- func ConfGeneratedInPodVolumeAndMount() (v1.Volume, v1.VolumeMount)
- func ConfigureExternalMetricsEndpoint(ctx *clusterd.Context, monitoringSpec cephv1.MonitoringSpec, ...) error
- func ContainerEnvVarReference(envVarName string) string
- func CreateBootstrapPeerSecret(ctx *clusterd.Context, clusterInfo *cephclient.ClusterInfo, ...) (reconcile.Result, error)
- func CreateOrUpdateObject(ctx context.Context, client client.Client, obj client.Object) error
- func CurrentAndDesiredCephVersion(ctx context.Context, rookImage, namespace, jobName string, ...) (*cephver.CephVersion, *cephver.CephVersion, error)
- func DaemonEnvVars(image string) []v1.EnvVar
- func DaemonFlags(cluster *client.ClusterInfo, spec *cephv1.ClusterSpec, daemonID string) []string
- func DaemonVolumeMounts(dataPaths *config.DataPathMap, keyringResourceName string) []v1.VolumeMount
- func DaemonVolumes(dataPaths *config.DataPathMap, keyringResourceName string) []v1.Volume
- func DaemonVolumesBase(dataPaths *config.DataPathMap, keyringResourceName string) []v1.Volume
- func DaemonVolumesContainsPVC(volumes []v1.Volume) bool
- func DaemonVolumesDataHostPath(dataPaths *config.DataPathMap) []v1.Volume
- func DaemonVolumesDataPVC(pvcName string) v1.Volume
- func DetectCephVersion(ctx context.Context, rookImage, namespace, jobName string, ...) (*cephver.CephVersion, error)
- func DiscoveryDaemonEnabled(data map[string]string) bool
- func DuplicateCephClusters(ctx context.Context, c client.Client, object client.Object, log bool) bool
- func ErrorCephUpgradingRequeue(runningCephVersion, desiredCephVersion *cephver.CephVersion) error
- func ExtractCephVersionFromLabel(labelVersion string) (*version.CephVersion, error)
- func ExtractKey(contents string) (string, error)
- func GenerateBootstrapPeerSecret(object client.Object, token []byte) *v1.Secret
- func GenerateLivenessProbeExecDaemon(daemonType, daemonID string) *v1.Probe
- func GenerateMinimalCephConfInitContainer(username, keyringPath string, containerImage string, ...) v1.Container
- func GenerateStartupProbeExecDaemon(daemonType, daemonID string) *v1.Probe
- func GenerateStatusInfo(object client.Object) map[string]string
- func GetCephVersionLabel(cephVersion version.CephVersion) string
- func GetContainerImagePullPolicy(containerImagePullPolicy v1.PullPolicy) v1.PullPolicy
- func GetImageVersion(cephCluster cephv1.CephCluster) (*cephver.CephVersion, error)
- func HostPathRequiresPrivileged() bool
- func IsDoNotReconcile(labels map[string]string) bool
- func IsReadyToReconcile(ctx context.Context, c client.Client, namespacedName types.NamespacedName, ...) (cephv1.CephCluster, bool, bool, reconcile.Result)
- func LogCollectorContainer(daemonID, ns string, c cephv1.ClusterSpec) *v1.Container
- func LoopDevicesAllowed() bool
- func NetworkBindingFlags(cluster *client.ClusterInfo, spec *cephv1.ClusterSpec) []string
- func ObjectToCRMapper(ctx context.Context, c client.Client, ro runtime.Object, ...) (handler.MapFunc, error)
- func ParseMonEndpoints(input string) map[string]*cephclient.MonInfo
- func PodSecurityContext() *v1.SecurityContext
- func PodVolumes(dataPaths *config.DataPathMap, dataDirHostPath string, confGeneratedInPod bool) []v1.Volume
- func PopulateExternalClusterInfo(cephClusterSpec *cephv1.ClusterSpec, context *clusterd.Context, ...) (*cephclient.ClusterInfo, error)
- func PrivilegedContext(runAsRoot bool) *v1.SecurityContext
- func ReloadManager()
- func RemoveFinalizer(ctx context.Context, client client.Client, obj client.Object) error
- func RemoveFinalizerWithName(ctx context.Context, client client.Client, obj client.Object, ...) error
- func RookVolumeMounts(dataPaths *config.DataPathMap, confGeneratedInPod bool) []v1.VolumeMount
- func SetAllowLoopDevices(data map[string]string)
- func SetCephCommandsTimeout(data map[string]string)
- func StoredLogAndCrashVolume(hostLogDir, hostCrashDir string) []v1.Volume
- func StoredLogAndCrashVolumeMount(varLogCephDir, varLibCephCrashDir string) []v1.VolumeMount
- func UpdateClusterCondition(c *clusterd.Context, cluster *cephv1.CephCluster, ...)
- func UpdateCondition(ctx context.Context, c *clusterd.Context, namespaceName types.NamespacedName, ...)
- func UpdateMonsOutOfQuorum(clientset kubernetes.Interface, namespace string, monsOutOfQuorum []string) error
- func ValidateCephVersionsBetweenLocalAndExternalClusters(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo) (cephver.CephVersion, error)
- func ValidatePeerToken(object client.Object, data map[string][]byte) error
- func WatchControllerPredicate() predicate.Funcs
- func WatchPredicateForNonCRDObject(owner runtime.Object, scheme *runtime.Scheme) predicate.Funcs
- type ClusterHealth
- type Mapping
- type MonScheduleInfo
- type OperatorConfig
- type OwnerMatcher
Constants ¶
const ( // OperatorCreds is the name of the secret //nolint:gosec // since this is not leaking any hardcoded credentials, it's just the secret name OperatorCreds = "rook-ceph-operator-creds" MonSecretNameKey = "mon-secret" CephUsernameKey = "ceph-username" CephUserSecretKey = "ceph-secret" // EndpointConfigMapName is the name of the configmap with mon endpoints EndpointConfigMapName = "rook-ceph-mon-endpoints" // EndpointDataKey is the name of the key inside the mon configmap to get the endpoints EndpointDataKey = "data" // OutOfQuorumKey is the name of the key for tracking mons detected out of quorum OutOfQuorumKey = "outOfQuorum" // MaxMonIDKey is the name of the max mon id used MaxMonIDKey = "maxMonId" // MappingKey is the name of the mapping for the mon->node and node->port MappingKey = "mapping" // AppName is the name of the secret storing cluster mon.admin key, fsid and name AppName = "rook-ceph-mon" DisasterProtectionFinalizerName = cephv1.CustomResourceGroup + "/disaster-protection" )
const ( // OperatorSettingConfigMapName refers to ConfigMap that configures rook ceph operator OperatorSettingConfigMapName string = "rook-ceph-operator-config" // UninitializedCephConfigError refers to the error message printed by the Ceph CLI when there is no ceph configuration file // This typically is raised when the operator has not finished initializing UninitializedCephConfigError = "error calling conf_read_file" // OperatorNotInitializedMessage is the message we print when the Operator is not ready to reconcile, typically the ceph.conf has not been generated yet OperatorNotInitializedMessage = "skipping reconcile since operator is still initializing" )
const ( //nolint:gosec // since this is not leaking any hardcoded credentials, it's just the prefix of the secret name RBDMirrorBootstrapPeerSecretName = "rbdMirrorBootstrapPeerSecretName" //nolint:gosec // since this is not leaking any hardcoded credentials, it's just the prefix of the secret name FSMirrorBootstrapPeerSecretName = "fsMirrorBootstrapPeerSecretName" )
const ( // ConfigInitContainerName is the name which is given to the config initialization container // in all Ceph pods. ConfigInitContainerName = "config-init" DaemonIDLabel = "ceph_daemon_id" ExternalMgrAppName = "rook-ceph-mgr-external" ServiceExternalMetricName = "http-external-metrics" CephUserID = 167 )
const ( // CephVersionLabelKey is the key used for reporting the Ceph version which Rook has detected is // configured for the labeled resource. CephVersionLabelKey = "ceph-version" )
const (
DoNotReconcileLabelName = "do_not_reconcile"
)
Variables ¶
var ( // ImmediateRetryResult Return this for a immediate retry of the reconciliation loop with the same request object. ImmediateRetryResult = reconcile.Result{Requeue: true} // ImmediateRetryResultNoBackoff Return this for a immediate retry of the reconciliation loop with the same request object. // Override the exponential backoff behavior by setting the RequeueAfter time explicitly. ImmediateRetryResultNoBackoff = reconcile.Result{Requeue: true, RequeueAfter: time.Second} // WaitForRequeueIfCephClusterNotReady waits for the CephCluster to be ready WaitForRequeueIfCephClusterNotReady = reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second} // WaitForRequeueIfCephClusterIsUpgrading waits until the upgrade is complete WaitForRequeueIfCephClusterIsUpgrading = reconcile.Result{Requeue: true, RequeueAfter: time.Minute} // WaitForRequeueIfFinalizerBlocked waits for resources to be cleaned up before the finalizer can be removed WaitForRequeueIfFinalizerBlocked = reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second} // WaitForRequeueIfOperatorNotInitialized waits for resources to be cleaned up before the finalizer can be removed WaitForRequeueIfOperatorNotInitialized = reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second} // OperatorCephBaseImageVersion is the ceph version in the operator image OperatorCephBaseImageVersion string )
var (
ClusterInfoNoClusterNoSecret = errors.New("not expected to create new cluster info and did not find existing secret")
)
var ClusterResource = k8sutil.CustomResource{ Name: "cephcluster", Plural: "cephclusters", Group: cephv1.CustomResourceGroup, Version: cephv1.Version, Kind: reflect.TypeOf(cephv1.CephCluster{}).Name(), APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), }
ClusterResource operator-kit Custom Resource Definition
Functions ¶
func AddCephVersionLabelToDaemonSet ¶
func AddCephVersionLabelToDaemonSet(cephVersion version.CephVersion, d *apps.DaemonSet)
AddCephVersionLabelToDaemonSet adds a label reporting the Ceph version which Rook has detected is running in the DaemonSet's pods.
func AddCephVersionLabelToDeployment ¶
func AddCephVersionLabelToDeployment(cephVersion version.CephVersion, d *apps.Deployment)
AddCephVersionLabelToDeployment adds a label reporting the Ceph version which Rook has detected is running in the Deployment's pods.
func AddCephVersionLabelToJob ¶
func AddCephVersionLabelToJob(cephVersion version.CephVersion, j *batch.Job)
AddCephVersionLabelToJob adds a label reporting the Ceph version which Rook has detected is running in the Job's pods.
func AddCephVersionLabelToObjectMeta ¶
func AddCephVersionLabelToObjectMeta(cephVersion version.CephVersion, meta *metav1.ObjectMeta)
func AddFinalizerIfNotPresent ¶
AddFinalizerIfNotPresent adds a finalizer an object to avoid instant deletion of the object without finalizing it.
func AddVolumeMountSubPath ¶
AddVolumeMountSubPath updates each init and regular container of the podspec such that each volume mount attached to a container is mounted under a subpath in the source volume. This is important because some daemons may not start if the volume mount directory is non-empty. When the volume is the root of an ext4 file system, one may find a "lost+found" directory.
func AdminFlags ¶
func AdminFlags(cluster *client.ClusterInfo) []string
AdminFlags returns the command line flags used for Ceph commands requiring admin authentication.
func AppLabels ¶
AppLabels returns labels common for all Rook-Ceph applications which may be useful for admins. App name is the name of the application: e.g., 'rook-ceph-mon', 'rook-ceph-mgr', etc.
func CephDaemonAppLabels ¶ added in v1.4.0
func CephDaemonAppLabels(appName, namespace, daemonType, daemonID, parentName, resourceKind string, includeNewLabels bool) map[string]string
CephDaemonAppLabels returns pod labels common to all Rook-Ceph pods which may be useful for admins. App name is the name of the application: e.g., 'rook-ceph-mon', 'rook-ceph-mgr', etc Daemon type is the Ceph daemon type: "mon", "mgr", "osd", "mds", "rgw" Daemon ID is the ID portion of the Ceph daemon name: "a" for "mon.a"; "c" for "mds.c" ParentName is the resource metadata.name: "rook-ceph", "my-cluster", etc ResourceKind is the CR type: "CephCluster", "CephFilesystem", etc
func CephSecurityContext ¶ added in v1.9.13
func CephSecurityContext() *v1.SecurityContext
PodSecurityContext detects if the pod needs privileges to run
func CephVolumeMounts ¶
func CephVolumeMounts(dataPaths *config.DataPathMap, confGeneratedInPod bool) []v1.VolumeMount
CephVolumeMounts returns the common list of Kubernetes volume mounts for Ceph containers. This function is only used for OSDs.
func CheckPodMemory ¶
func CheckPodMemory(name string, resources v1.ResourceRequirements, cephPodMinimumMemory uint64) error
CheckPodMemory verify pod's memory limit is valid
func ChownCephDataDirsInitContainer ¶
func ChownCephDataDirsInitContainer( dpm config.DataPathMap, containerImage string, containerImagePullPolicy v1.PullPolicy, volumeMounts []v1.VolumeMount, resources v1.ResourceRequirements, securityContext *v1.SecurityContext, ) v1.Container
ChownCephDataDirsInitContainer returns an init container which `chown`s the given data directories as the `ceph:ceph` user in the container. It also `chown`s the Ceph log dir in the container automatically. Doing a chown in a post start lifecycle hook does not reliably complete before the OSD process starts, which can cause the pod to fail without the lifecycle hook's chown command completing. It can take an arbitrarily long time for a pod restart to successfully chown the directory. This is a race condition for all daemons; therefore, do this in an init container. See more discussion here: https://github.com/rook/rook/pull/3594#discussion_r312279176
func ClusterOwnerRef ¶ added in v1.4.0
func ClusterOwnerRef(clusterName, clusterID string) metav1.OwnerReference
ClusterOwnerRef represents the owner reference of the CephCluster CR
func ConfGeneratedInPodVolumeAndMount ¶ added in v1.5.0
func ConfGeneratedInPodVolumeAndMount() (v1.Volume, v1.VolumeMount)
ConfGeneratedInPodVolumeAndMount generate an empty dir of /etc/ceph
func ConfigureExternalMetricsEndpoint ¶ added in v1.6.3
func ConfigureExternalMetricsEndpoint(ctx *clusterd.Context, monitoringSpec cephv1.MonitoringSpec, clusterInfo *client.ClusterInfo, ownerInfo *k8sutil.OwnerInfo) error
func ContainerEnvVarReference ¶
ContainerEnvVarReference returns a reference to a Kubernetes container env var of the given name which can be used in command or argument fields.
func CreateBootstrapPeerSecret ¶ added in v1.7.1
func CreateOrUpdateObject ¶
CreateOrUpdateObject updates an object with a given status
func CurrentAndDesiredCephVersion ¶ added in v1.8.0
func CurrentAndDesiredCephVersion(ctx context.Context, rookImage, namespace, jobName string, ownerInfo *k8sutil.OwnerInfo, context *clusterd.Context, cephClusterSpec *cephv1.ClusterSpec, clusterInfo *cephclient.ClusterInfo) (*cephver.CephVersion, *cephver.CephVersion, error)
func DaemonEnvVars ¶
DaemonEnvVars returns the container environment variables used by all Ceph daemons.
func DaemonFlags ¶
func DaemonFlags(cluster *client.ClusterInfo, spec *cephv1.ClusterSpec, daemonID string) []string
DaemonFlags returns the command line flags used by all Ceph daemons.
func DaemonVolumeMounts ¶
func DaemonVolumeMounts(dataPaths *config.DataPathMap, keyringResourceName string) []v1.VolumeMount
DaemonVolumeMounts returns volume mounts which correspond to the DaemonVolumes. These volume mounts are shared by most all Ceph daemon containers, both init and standard. If keyring resource name is empty, there will be no keyring mounted in the container.
func DaemonVolumes ¶
func DaemonVolumes(dataPaths *config.DataPathMap, keyringResourceName string) []v1.Volume
DaemonVolumes returns the pod volumes used by all Ceph daemons. If keyring resource name is empty, there will be no keyring volume created from a secret.
func DaemonVolumesBase ¶
func DaemonVolumesBase(dataPaths *config.DataPathMap, keyringResourceName string) []v1.Volume
DaemonVolumesBase returns the common / static set of volumes.
func DaemonVolumesContainsPVC ¶
DaemonVolumesContainsPVC returns true if a volume exists with a volume source configured with a persistent volume claim.
func DaemonVolumesDataHostPath ¶
func DaemonVolumesDataHostPath(dataPaths *config.DataPathMap) []v1.Volume
DaemonVolumesDataHostPath returns HostPath volume source for daemon container data.
func DaemonVolumesDataPVC ¶
DaemonVolumesDataPVC returns a PVC volume source for daemon container data.
func DetectCephVersion ¶ added in v1.8.0
func DetectCephVersion(ctx context.Context, rookImage, namespace, jobName string, ownerInfo *k8sutil.OwnerInfo, clientset kubernetes.Interface, cephClusterSpec *cephv1.ClusterSpec) (*cephver.CephVersion, error)
DetectCephVersion loads the ceph version from the image and checks that it meets the version requirements to run in the cluster
func DiscoveryDaemonEnabled ¶ added in v1.6.0
func DuplicateCephClusters ¶ added in v1.8.2
func DuplicateCephClusters(ctx context.Context, c client.Client, object client.Object, log bool) bool
DuplicateCephClusters determine whether a similar object exists in the same namespace mainly used for the CephCluster which we only support a single instance per namespace
func ErrorCephUpgradingRequeue ¶ added in v1.8.0
func ErrorCephUpgradingRequeue(runningCephVersion, desiredCephVersion *cephver.CephVersion) error
func ExtractCephVersionFromLabel ¶
func ExtractCephVersionFromLabel(labelVersion string) (*version.CephVersion, error)
ExtractCephVersionFromLabel returns a CephVersion struct deserialized from a provided version label.
func ExtractKey ¶ added in v1.9.2
ExtractKey retrieves mon secret key from the keyring file
func GenerateBootstrapPeerSecret ¶ added in v1.7.1
GenerateBootstrapPeerSecret generates a Kubernetes Secret for the mirror bootstrap peer token
func GenerateLivenessProbeExecDaemon ¶
GenerateLivenessProbeExecDaemon generates a liveness probe that makes sure a daemon has a socket, that it can be called, and that it returns 0
func GenerateMinimalCephConfInitContainer ¶
func GenerateMinimalCephConfInitContainer( username, keyringPath string, containerImage string, containerImagePullPolicy v1.PullPolicy, volumeMounts []v1.VolumeMount, resources v1.ResourceRequirements, securityContext *v1.SecurityContext, ) v1.Container
GenerateMinimalCephConfInitContainer returns an init container that will generate the most basic Ceph config for connecting non-Ceph daemons to a Ceph cluster (e.g., nfs-ganesha). Effectively what this means is that it generates '/etc/ceph/ceph.conf' with 'mon_host' populated and a keyring path associated with the user given. 'mon_host' is determined by the 'ROOK_CEPH_MON_HOST' env var present in other Ceph daemon pods, and the keyring is expected to be mounted into the container with a Kubernetes pod volume+mount.
func GenerateStartupProbeExecDaemon ¶ added in v1.8.2
GenerateStartupProbeExecDaemon generates a startup probe that makes sure a daemon has a socket, that it can be called, and that it returns 0
func GenerateStatusInfo ¶ added in v1.7.1
func GetCephVersionLabel ¶
func GetCephVersionLabel(cephVersion version.CephVersion) string
GetCephVersionLabel returns a formatted serialization of a provided CephVersion for use in resource labels.
func GetContainerImagePullPolicy ¶ added in v1.10.3
func GetContainerImagePullPolicy(containerImagePullPolicy v1.PullPolicy) v1.PullPolicy
func GetImageVersion ¶ added in v1.3.6
func GetImageVersion(cephCluster cephv1.CephCluster) (*cephver.CephVersion, error)
GetImageVersion returns the CephVersion registered for a specified image (if any) and whether any image was found.
func HostPathRequiresPrivileged ¶ added in v1.7.8
func HostPathRequiresPrivileged() bool
func IsDoNotReconcile ¶ added in v1.4.8
func IsReadyToReconcile ¶
func IsReadyToReconcile(ctx context.Context, c client.Client, namespacedName types.NamespacedName, controllerName string) (cephv1.CephCluster, bool, bool, reconcile.Result)
IsReadyToReconcile determines if a controller is ready to reconcile or not
func LogCollectorContainer ¶ added in v1.5.2
func LogCollectorContainer(daemonID, ns string, c cephv1.ClusterSpec) *v1.Container
LogCollectorContainer rotate logs
func LoopDevicesAllowed ¶ added in v1.10.6
func LoopDevicesAllowed() bool
func NetworkBindingFlags ¶ added in v1.6.0
func NetworkBindingFlags(cluster *client.ClusterInfo, spec *cephv1.ClusterSpec) []string
func ObjectToCRMapper ¶ added in v1.4.0
func ObjectToCRMapper(ctx context.Context, c client.Client, ro runtime.Object, scheme *runtime.Scheme) (handler.MapFunc, error)
ObjectToCRMapper returns the list of a given object type metadata It is used to trigger a reconcile object Kind A when watching object Kind B So we reconcile Kind A instead of Kind B For instance, we watch for CephCluster CR changes but want to reconcile CephFilesystem based on a Spec change
func ParseMonEndpoints ¶ added in v1.9.2
func ParseMonEndpoints(input string) map[string]*cephclient.MonInfo
ParseMonEndpoints parses a flattened representation of mons and endpoints in the form <mon-name>=<mon-endpoint> and returns a list of Ceph mon configs.
func PodSecurityContext ¶ added in v1.5.2
func PodSecurityContext() *v1.SecurityContext
PodSecurityContext detects if the pod needs privileges to run
func PodVolumes ¶
func PodVolumes(dataPaths *config.DataPathMap, dataDirHostPath string, confGeneratedInPod bool) []v1.Volume
PodVolumes fills in the volumes parameter with the common list of Kubernetes volumes for use in Ceph pods. This function is only used for OSDs.
func PopulateExternalClusterInfo ¶ added in v1.9.2
func PopulateExternalClusterInfo(cephClusterSpec *cephv1.ClusterSpec, context *clusterd.Context, ctx context.Context, namespace string, ownerInfo *k8sutil.OwnerInfo) (*cephclient.ClusterInfo, error)
PopulateExternalClusterInfo Add validation in the code to fail if the external cluster has no OSDs keep waiting
func PrivilegedContext ¶ added in v1.8.1
func PrivilegedContext(runAsRoot bool) *v1.SecurityContext
PrivilegedContext returns a privileged Pod security context
func ReloadManager ¶ added in v1.8.0
func ReloadManager()
func RemoveFinalizer ¶
RemoveFinalizer removes a finalizer from an object
func RemoveFinalizerWithName ¶ added in v1.8.0
func RemoveFinalizerWithName(ctx context.Context, client client.Client, obj client.Object, finalizerName string) error
RemoveFinalizerWithName removes finalizer passed as an argument from an object
func RookVolumeMounts ¶
func RookVolumeMounts(dataPaths *config.DataPathMap, confGeneratedInPod bool) []v1.VolumeMount
RookVolumeMounts returns the common list of Kubernetes volume mounts for Rook containers. This function is only used by OSDs.
func SetAllowLoopDevices ¶ added in v1.10.6
func SetCephCommandsTimeout ¶ added in v1.7.1
SetCephCommandsTimeout sets the timeout value of Ceph commands which are executed from Rook
func StoredLogAndCrashVolume ¶
StoredLogAndCrashVolume returns a pod volume sourced from the stored log and crashes files.
func StoredLogAndCrashVolumeMount ¶
func StoredLogAndCrashVolumeMount(varLogCephDir, varLibCephCrashDir string) []v1.VolumeMount
StoredLogAndCrashVolumeMount returns a pod volume sourced from the stored log and crashes files.
func UpdateClusterCondition ¶ added in v1.5.9
func UpdateClusterCondition(c *clusterd.Context, cluster *cephv1.CephCluster, namespaceName types.NamespacedName, observedGeneration int64, conditionType cephv1.ConditionType, status v1.ConditionStatus, reason cephv1.ConditionReason, message string, preserveAllConditions bool)
UpdateClusterCondition function will export each condition into the cluster custom resource
func UpdateCondition ¶ added in v1.5.9
func UpdateCondition(ctx context.Context, c *clusterd.Context, namespaceName types.NamespacedName, observedGeneration int64, conditionType cephv1.ConditionType, status v1.ConditionStatus, reason cephv1.ConditionReason, message string)
UpdateCondition function will export each condition into the cluster custom resource
func UpdateMonsOutOfQuorum ¶ added in v1.10.6
func UpdateMonsOutOfQuorum(clientset kubernetes.Interface, namespace string, monsOutOfQuorum []string) error
func ValidateCephVersionsBetweenLocalAndExternalClusters ¶
func ValidateCephVersionsBetweenLocalAndExternalClusters(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo) (cephver.CephVersion, error)
ValidateCephVersionsBetweenLocalAndExternalClusters makes sure an external cluster can be connected by checking the external ceph versions available and comparing it with the local image provided
func ValidatePeerToken ¶ added in v1.7.1
func WatchControllerPredicate ¶
WatchControllerPredicate is a special update filter for update events do not reconcile if the status changes, this avoids a reconcile storm loop
returning 'true' means triggering a reconciliation returning 'false' means do NOT trigger a reconciliation
func WatchPredicateForNonCRDObject ¶
WatchPredicateForNonCRDObject is a special filter for create events It only applies to non-CRD objects, meaning, for instance a cephv1.CephBlockPool{} object will not have this filter Only for objects like &v1.Secret{} etc...
We return 'false' on a create event so we don't overstep with the main watcher on cephv1.CephBlockPool{} This avoids a double reconcile when the secret gets deleted.
Types ¶
type ClusterHealth ¶ added in v1.8.8
type ClusterHealth struct { InternalCtx context.Context InternalCancel context.CancelFunc }
ClusterHealth is passed to the various monitoring go routines to stop them when the context is cancelled
type Mapping ¶ added in v1.9.2
type Mapping struct { // This isn't really node info since it could also be for zones, but we leave it as "node" for backward compatibility. Schedule map[string]*MonScheduleInfo `json:"node"` }
Mapping is mon node and port mapping
func CreateOrLoadClusterInfo ¶ added in v1.9.2
func CreateOrLoadClusterInfo(clusterdContext *clusterd.Context, context context.Context, namespace string, ownerInfo *k8sutil.OwnerInfo, cephClusterSpec *cephv1.ClusterSpec) (*cephclient.ClusterInfo, int, *Mapping, error)
CreateOrLoadClusterInfo constructs or loads a clusterinfo and returns it along with the maxMonID
func LoadClusterInfo ¶ added in v1.9.2
func LoadClusterInfo(ctx *clusterd.Context, context context.Context, namespace string, cephClusterSpec *cephv1.ClusterSpec) (*cephclient.ClusterInfo, int, *Mapping, error)
LoadClusterInfo constructs or loads a clusterinfo and returns it along with the maxMonID
type MonScheduleInfo ¶ added in v1.9.2
type MonScheduleInfo struct { // Name of the node. **json names are capitalized for backwards compat** Name string `json:"Name,omitempty"` Hostname string `json:"Hostname,omitempty"` Address string `json:"Address,omitempty"` Zone string `json:"zone,omitempty"` }
MonScheduleInfo contains name and address of a node.
type OperatorConfig ¶ added in v1.8.0
type OperatorConfig struct { OperatorNamespace string Image string ServiceAccount string NamespaceToWatch string Parameters map[string]string }
OperatorConfig represents the configuration of the operator
type OwnerMatcher ¶
type OwnerMatcher struct {
// contains filtered or unexported fields
}
OwnerMatcher is a struct representing the controller owner reference to use for comparison with child objects
func NewOwnerReferenceMatcher ¶
NewOwnerReferenceMatcher initializes a new owner reference matcher
func (*OwnerMatcher) Match ¶
Match checks whether a given object matches the parent controller owner reference It is used in the predicate functions for non-CRD objects to ensure we only watch resources that have the parent Kind in its owner reference AND the same UID
So we won't reconcile other object is we have multiple CRs
For example, for CephObjectStore we will only watch "secrets" that have an owner reference referencing the 'CephObjectStore' Kind