osd

package
v1.7.8 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 18, 2021 License: Apache-2.0 Imports: 39 Imported by: 68

Documentation

Overview

Package osd for the Ceph OSDs.

Package osd for the Ceph OSDs.

Package osd for the Ceph OSDs.

Package config provides methods for generating the Ceph config for a Ceph cluster and for producing a "ceph.conf" compatible file from the config as well as Ceph command line-compatible flags.

Index

Constants

View Source
const (

	// EncryptedDeviceEnvVarName is used in the pod spec to indicate whether the OSD is encrypted or not
	EncryptedDeviceEnvVarName = "ROOK_ENCRYPTED_DEVICE"
	PVCNameEnvVarName         = "ROOK_PVC_NAME"
	// CephVolumeEncryptedKeyEnvVarName is the env variable used by ceph-volume to encrypt the OSD (raw mode)
	// Hardcoded in ceph-volume do NOT touch
	CephVolumeEncryptedKeyEnvVarName = "CEPH_VOLUME_DMCRYPT_SECRET"

	// PVCBackedOSDVarName indicates whether the OSD is on PVC ("true") or not ("false")
	PVCBackedOSDVarName = "ROOK_PVC_BACKED_OSD"

	CrushDeviceClassVarName   = "ROOK_OSD_CRUSH_DEVICE_CLASS"
	CrushInitialWeightVarName = "ROOK_OSD_CRUSH_INITIAL_WEIGHT"
	CrushRootVarName          = "ROOK_CRUSHMAP_ROOT"
)
View Source
const (
	// CephDeviceSetLabelKey is the Rook device set label key
	CephDeviceSetLabelKey = "ceph.rook.io/DeviceSet"
	// CephSetIndexLabelKey is the Rook label key index
	CephSetIndexLabelKey = "ceph.rook.io/setIndex"
	// CephDeviceSetPVCIDLabelKey is the Rook PVC ID label key
	CephDeviceSetPVCIDLabelKey = "ceph.rook.io/DeviceSetPVCId"
	// OSDOverPVCLabelKey is the Rook PVC label key
	OSDOverPVCLabelKey = "ceph.rook.io/pvc"
	// TopologyLocationLabel is the crush location label added to OSD deployments
	TopologyLocationLabel = "topology-location-%s"
)
View Source
const (
	// AppName is the "app" label on osd pods
	AppName = "rook-ceph-osd"
	// FailureDomainKey is the label key whose value is the failure domain of the OSD
	FailureDomainKey = "failure-domain"

	// OsdIdLabelKey is the OSD label key
	OsdIdLabelKey = "ceph-osd-id"
)
View Source
const (

	// DmcryptBlockType is a portion of the device mapper name for the encrypted OSD on PVC block.db (rocksdb db)
	DmcryptBlockType = "block-dmcrypt"
	// DmcryptMetadataType is a portion of the device mapper name for the encrypted OSD on PVC block
	DmcryptMetadataType = "db-dmcrypt"
	// DmcryptWalType is a portion of the device mapper name for the encrypted OSD on PVC wal
	DmcryptWalType = "wal-dmcrypt"
)
View Source
const (
	// OrchestrationStatusStarting denotes the OSD provisioning is beginning.
	OrchestrationStatusStarting = "starting"
	// OrchestrationStatusOrchestrating denotes the OSD provisioning has begun and is running.
	OrchestrationStatusOrchestrating = "orchestrating"
	// OrchestrationStatusCompleted denotes the OSD provisioning has completed. This does not imply
	// the provisioning completed successfully in whole or in part.
	OrchestrationStatusCompleted = "completed"
	// OrchestrationStatusFailed denotes the OSD provisioning has failed.
	OrchestrationStatusFailed = "failed"
)

Variables

View Source
var (

	// The labels that can be specified with the K8s labels such as topology.kubernetes.io/zone
	// These are all at the top layers of the CRUSH map.
	KubernetesTopologyLabels = []string{"zone", "region"}

	// The node labels that are supported with the topology.rook.io prefix such as topology.rook.io/rack
	// The labels are in order from lowest to highest in the CRUSH hierarchy
	CRUSHTopologyLabels = []string{"chassis", "rack", "row", "pdu", "pod", "room", "datacenter"}

	// The list of supported failure domains in the CRUSH map, ordered from lowest to highest
	CRUSHMapLevelsOrdered = append([]string{"host"}, append(CRUSHTopologyLabels, KubernetesTopologyLabels...)...)
)

Functions

func ExtractOSDTopologyFromLabels added in v1.2.6

func ExtractOSDTopologyFromLabels(labels map[string]string) (map[string]string, string)

ExtractTopologyFromLabels extracts rook topology from labels and returns a map from topology type to value

func GetExistingPVCs added in v1.5.6

func GetExistingPVCs(clusterdContext *clusterd.Context, namespace string) (map[string]*v1.PersistentVolumeClaim, map[string]sets.String, error)

GetExistingPVCs fetches the list of OSD PVCs

func GetLocationWithNode added in v1.2.2

func GetLocationWithNode(clientset kubernetes.Interface, nodeName string, crushRoot, crushHostname string) (string, string, error)

GetLocationWithNode gets the topology information about the node. The return values are:

 location: The CRUSH properties for the OSD to apply
 topologyAffinity: The label to be applied to the OSD daemon to guarantee it will start in the same
		topology as the OSD prepare job.

func PrivilegedContext added in v1.3.0

func PrivilegedContext() *v1.SecurityContext

PrivilegedContext returns a privileged Pod security context

func UpdateNodeOrPVCStatus added in v1.7.3

func UpdateNodeOrPVCStatus(kv *k8sutil.ConfigMapKVStore, nodeOrPVC string, status OrchestrationStatus) string

UpdateNodeOrPVCStatus updates the status ConfigMap for the OSD on the given node or PVC. It returns the name the ConfigMap used.

Types

type Cluster

type Cluster struct {
	ValidStorage cephv1.StorageScopeSpec // valid subset of `Storage`, computed at runtime
	// contains filtered or unexported fields
}

Cluster keeps track of the OSDs

func New

func New(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, spec cephv1.ClusterSpec, rookVersion string) *Cluster

New creates an instance of the OSD manager

func (*Cluster) Start

func (c *Cluster) Start() error

Start the osd management

type OSDHealthMonitor added in v1.3.3

type OSDHealthMonitor struct {
	// contains filtered or unexported fields
}

OSDHealthMonitor defines OSD process monitoring

func NewOSDHealthMonitor added in v1.3.3

func NewOSDHealthMonitor(context *clusterd.Context, clusterInfo *client.ClusterInfo, removeOSDsIfOUTAndSafeToRemove bool, healthCheck cephv1.CephClusterHealthCheckSpec) *OSDHealthMonitor

NewOSDHealthMonitor instantiates OSD monitoring

func (*OSDHealthMonitor) Start added in v1.3.3

func (m *OSDHealthMonitor) Start(stopCh chan struct{})

Start runs monitoring logic for osds status at set intervals

func (*OSDHealthMonitor) Update added in v1.3.3

func (m *OSDHealthMonitor) Update(removeOSDsIfOUTAndSafeToRemove bool)

Update updates the removeOSDsIfOUTAndSafeToRemove

type OSDInfo

type OSDInfo struct {
	ID             int    `json:"id"`
	Cluster        string `json:"cluster"`
	UUID           string `json:"uuid"`
	DevicePartUUID string `json:"device-part-uuid"`
	DeviceClass    string `json:"device-class"`
	// BlockPath is the logical Volume path for an OSD created by Ceph-volume with format '/dev/<Volume Group>/<Logical Volume>' or simply /dev/vdb if block mode is used
	BlockPath     string `json:"lv-path"`
	MetadataPath  string `json:"metadata-path"`
	WalPath       string `json:"wal-path"`
	SkipLVRelease bool   `json:"skip-lv-release"`
	Location      string `json:"location"`
	LVBackedPV    bool   `json:"lv-backed-pv"`
	CVMode        string `json:"lv-mode"`
	Store         string `json:"store"`
	// Ensure the OSD daemon has affinity with the same topology from the OSD prepare pod
	TopologyAffinity string `json:"topologyAffinity"`
}

OSDInfo represent all the properties of a given OSD

type OrchestrationStatus

type OrchestrationStatus struct {
	OSDs         []OSDInfo `json:"osds"`
	Status       string    `json:"status"`
	PvcBackedOSD bool      `json:"pvc-backed-osd"`
	Message      string    `json:"message"`
}

OrchestrationStatus represents the status of an OSD orchestration

Directories

Path Synopsis
Package config for OSD config managed by the operator
Package config for OSD config managed by the operator

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL