volsync

package
v0.0.0-...-44c146a Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 26, 2024 License: Apache-2.0 Imports: 28 Imported by: 0

Documentation

Index

Constants

View Source
const (
	ManagedClusterAddOnKind    string = "ManagedClusterAddOn"
	ManagedClusterAddOnGroup   string = "addon.open-cluster-management.io"
	ManagedClusterAddOnVersion string = "v1alpha1"

	VolsyncManagedClusterAddOnName string = "volsync" // Needs to have this name
)
View Source
const (
	ServiceExportKind    string = "ServiceExport"
	ServiceExportGroup   string = "multicluster.x-k8s.io"
	ServiceExportVersion string = "v1alpha1"

	VolumeSnapshotKind                     string = "VolumeSnapshot"
	VolumeSnapshotIsDefaultAnnotation      string = "snapshot.storage.kubernetes.io/is-default-class"
	VolumeSnapshotIsDefaultAnnotationValue string = "true"

	PodVolumePVCClaimIndexName    string = "spec.volumes.persistentVolumeClaim.claimName"
	VolumeAttachmentToPVIndexName string = "spec.source.persistentVolumeName"

	VRGOwnerLabel          string = "volumereplicationgroups-owner"
	FinalSyncTriggerString string = "vrg-final-sync"

	SchedulingIntervalMinLength int = 2
	CronSpecMaxDayOfMonth       int = 28

	VolSyncDoNotDeleteLabel    = "volsync.backube/do-not-delete" // TODO: point to volsync constant once it is available
	VolSyncDoNotDeleteLabelVal = "true"

	// See: https://issues.redhat.com/browse/ACM-1256
	// https://github.com/stolostron/backlog/issues/21824
	ACMAppSubDoNotDeleteAnnotation    = "apps.open-cluster-management.io/do-not-delete"
	ACMAppSubDoNotDeleteAnnotationVal = "true"

	OwnerNameAnnotation      = "ramendr.openshift.io/owner-name"
	OwnerNamespaceAnnotation = "ramendr.openshift.io/owner-namespace"
)

Variables

View Source
var DefaultRsyncServiceType corev1.ServiceType = corev1.ServiceTypeClusterIP
View Source
var DefaultScheduleCronSpec = "*/10 * * * *" // Every 10 mins

Functions

func CleanupSecretPropagation

func CleanupSecretPropagation(ctx context.Context, k8sClient client.Client,
	ownerObject metav1.Object, log logr.Logger,
) error

Cleans up policy, placementrule and placementbinding used to replicate the volsync secret (if they exist) does not throw an error if they do not exist

func ConvertSchedulingIntervalToCronSpec

func ConvertSchedulingIntervalToCronSpec(schedulingInterval string) (*string, error)

Convert from schedulingInterval which is in the format of <num><m,h,d> to the format VolSync expects, which is cronspec: https://en.wikipedia.org/wiki/Cron#Overview

func DeployVolSyncToCluster

func DeployVolSyncToCluster(ctx context.Context, k8sClient client.Client,
	managedClusterName string, log logr.Logger,
) error

Function to deploy Volsync from ACM to managed cluster via a ManagedClusterAddOn

Calling this function requires a clusterrole that can create/update ManagedClusterAddOns

Should be called from the Hub

func GetVolSyncPSKSecretNameFromVRGName

func GetVolSyncPSKSecretNameFromVRGName(vrgName string) string

func PropagateSecretToClusters

func PropagateSecretToClusters(ctx context.Context, k8sClient client.Client, sourceSecret *corev1.Secret,
	ownerObject metav1.Object, destClusters []string, destSecretName, destSecretNamespace string,
	log logr.Logger,
) error

Should be run from a hub - assumes the source secret exists on the hub cluster and should be propagated to destClusters. Creates Policy/PlacementRule/PlacementBinding on the hub in the same namespace as the source secret

func ReconcileVolSyncReplicationSecret

func ReconcileVolSyncReplicationSecret(ctx context.Context, k8sClient client.Client, ownerObject metav1.Object,
	secretName, secretNamespace string, log logr.Logger) (*corev1.Secret, error,
)

Creates a new volsync replication secret on the cluster (should be called on the hub cluster). If the secret already exists, nop

func ValidateObjectExists

func ValidateObjectExists(ctx context.Context, c client.Client, obj client.Object) error

ValidateObjectExists indicates whether a kubernetes resource exists in APIServer

Types

type VSHandler

type VSHandler struct {
	// contains filtered or unexported fields
}

func NewVSHandler

func NewVSHandler(ctx context.Context, client client.Client, log logr.Logger, owner metav1.Object,
	asyncSpec *ramendrv1alpha1.VRGAsyncSpec, defaultCephFSCSIDriverName string, copyMethod string,
) *VSHandler

func (*VSHandler) CleanupRDNotInSpecList

func (v *VSHandler) CleanupRDNotInSpecList(rdSpecList []ramendrv1alpha1.VolSyncReplicationDestinationSpec) error

func (*VSHandler) DeleteRD

func (v *VSHandler) DeleteRD(pvcName string) error

func (*VSHandler) DeleteRS

func (v *VSHandler) DeleteRS(pvcName string) error

func (*VSHandler) DisownVolSyncManagedPVC

func (v *VSHandler) DisownVolSyncManagedPVC(pvc *corev1.PersistentVolumeClaim) error

func (*VSHandler) EnsurePVCforDirectCopy

func (v *VSHandler) EnsurePVCforDirectCopy(ctx context.Context,
	rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec,
) error

func (*VSHandler) EnsurePVCfromRD

func (v *VSHandler) EnsurePVCfromRD(rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec, failoverAction bool,
) error

func (*VSHandler) GetVolumeSnapshotClassFromPVCStorageClass

func (v *VSHandler) GetVolumeSnapshotClassFromPVCStorageClass(storageClassName *string) (string, error)

func (*VSHandler) GetVolumeSnapshotClasses

func (v *VSHandler) GetVolumeSnapshotClasses() ([]snapv1.VolumeSnapshotClass, error)

func (*VSHandler) IsCopyMethodDirect

func (v *VSHandler) IsCopyMethodDirect() bool

func (*VSHandler) IsRDDataProtected

func (v *VSHandler) IsRDDataProtected(pvcName string) (bool, error)

Returns true if at least one sync has completed (we'll consider this "data protected")

func (*VSHandler) IsRSDataProtected

func (v *VSHandler) IsRSDataProtected(pvcName string) (bool, error)

func (*VSHandler) ModifyRSSpecForCephFS

func (v *VSHandler) ModifyRSSpecForCephFS(rsSpec *ramendrv1alpha1.VolSyncReplicationSourceSpec,
	storageClass *storagev1.StorageClass,
) error

Workaround for cephfs issue: FIXME: For CephFS only, there is a problem where restoring a PVC from snapshot can be very slow when there are a lot of files - on every replication cycle we need to create a PVC from snapshot in order to get a point-in-time copy of the source PVC to sync with the replicationdestination. This workaround follows the instructions here: https://github.com/ceph/ceph-csi/blob/devel/docs/cephfs-snapshot-backed-volumes.md

Steps: 1. If the storageclass detected is cephfs, create a new storageclass with backingSnapshot: "true" parameter (or reuse if it already exists). If not cephfs, return and do not modify rsSpec. 2. Modify rsSpec to use the new storageclass and also update AccessModes to 'ReadOnlyMany' as per the instructions above.

func (*VSHandler) PrecreateDestPVCIfEnabled

func (v *VSHandler) PrecreateDestPVCIfEnabled(rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec,
) (*string, error)

func (*VSHandler) PreparePVC

func (v *VSHandler) PreparePVC(pvcName string, prepFinalSync, copyMethodDirect bool) error

func (*VSHandler) ReconcileRD

returns replication destination only if create/update is successful and the RD is considered available. Callers should assume getting a nil replication destination back means they should retry/requeue.

func (*VSHandler) ReconcileRS

Returns true only if runFinalSync is true and the final sync is done Returns replication source only if create/update is successful Callers should assume getting a nil replication source back means they should retry/requeue. Returns true/false if final sync is complete, and also returns an RS if one was reconciled.

func (*VSHandler) TakePVCOwnership

func (v *VSHandler) TakePVCOwnership(pvcName string) (bool, error)

TakePVCOwnership adds do-not-delete annotation to indicate that ACM should not delete/cleanup this pvc when the appsub is removed and adds VRG as owner so the PVC is garbage collected when the VRG is deleted.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL