controller

package
v0.8.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 10, 2024 License: Apache-2.0 Imports: 25 Imported by: 0

Documentation

Index

Constants

View Source
const (
	// CurrentHdfsVersion
	// hdfs-site k/v list.https://hadoop.apache.org/docs/r3.3.6/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml
	// core-site k/v list.https://hadoop.apache.org/docs/r3.3.6/hadoop-project-dist/hadoop-common/core-default.xml
	CurrentHdfsVersion = "v3.3.6"
	// DefaultNameSuffix is the default name suffix of the resources of the hdfs
	DefaultNameSuffix = "-hdfs"

	// DefaultClusterSign is the default cluster sign of the hdfs
	DefaultClusterSign = "hdfs"

	// DefaultStorageClass is the default storage class of the hdfs
	DefaultStorageClass = "nineinfra-default"

	// DefaultNameService is the default name service for hdfs
	DefaultNameService = "nineinfra"

	// DefaultWebUIUser is the default web ui user for hdfs
	DefaultWebUIUser = "root"

	// DefaultKyuubiUser is the default nineinfra user for hdfs
	DefaultKyuubiUser = "kyuubi"

	// DefaultHiveUser is the default hive user for hdfs
	DefaultHiveUser = "hive"

	// DefaultQuorumReplicas is the default quorum replicas
	DefaultQuorumReplicas = 3

	// DefaultHaReplicas is the default ha replicas for namenode
	DefaultHaReplicas = 2

	// DefaultReplicas is the default replicas for namenode and httpfs
	DefaultReplicas = 1

	// DefaultDiskNum is the default disk num
	DefaultDiskNum = 1

	// DefaultMaxWaitSeconds is the default max wait seconds
	DefaultMaxWaitSeconds = 600

	// DefaultClusterDomainName is the default domain name key for the k8s cluster
	DefaultClusterDomainName = "clusterDomain"

	// DefaultClusterDomain is the default domain name value for the k8s cluster
	DefaultClusterDomain = "cluster.local"

	// DefaultLogVolumeName is the default log volume name
	DefaultLogVolumeName = "log"

	// DefaultConfigNameSuffix is the default config name suffix
	DefaultConfigNameSuffix = "config"

	// DefaultHeadlessSvcNameSuffix is the default headless service suffix
	DefaultHeadlessSvcNameSuffix = "headless"

	// DefaultCoreSiteFile is the default core site file name
	DefaultCoreSiteFile = "core-site.xml"

	// DefaultHdfsSiteFile is the default hdfs site file name
	DefaultHdfsSiteFile = "hdfs-site.xml"

	// DefaultHttpFSSiteFile is the default httpfs site file name
	DefaultHttpFSSiteFile = "httpfs-site.xml"

	// DefaultTerminationGracePeriod is the default time given before the
	// container is stopped. This gives clients time to disconnect from a
	// specific node gracefully.
	DefaultTerminationGracePeriod = 30

	// DefaultHdfsVolumeSize is the default volume size for the
	// Hdfs data volume
	DefaultHdfsVolumeSize = "100Gi"

	// DefaultHdfsLogVolumeSize is the default volume size for the
	// Hdfs log volume
	DefaultHdfsLogVolumeSize = "5Gi"

	// DefaultIOBufferSize is the default size for the
	// io file buffer
	DefaultIOBufferSize = "131072"

	// DefaultReadinessProbeInitialDelaySeconds is the default initial delay (in seconds)
	// for the readiness probe
	DefaultReadinessProbeInitialDelaySeconds = 40

	// DefaultReadinessProbePeriodSeconds is the default probe period (in seconds)
	// for the readiness probe
	DefaultReadinessProbePeriodSeconds = 10

	// DefaultReadinessProbeFailureThreshold is the default probe failure threshold
	// for the readiness probe
	DefaultReadinessProbeFailureThreshold = 60

	// DefaultReadinessProbeSuccessThreshold is the default probe success threshold
	// for the readiness probe
	DefaultReadinessProbeSuccessThreshold = 1

	// DefaultReadinessProbeTimeoutSeconds is the default probe timeout (in seconds)
	// for the readiness probe
	DefaultReadinessProbeTimeoutSeconds = 10

	// DefaultLivenessProbeInitialDelaySeconds is the default initial delay (in seconds)
	// for the liveness probe
	DefaultLivenessProbeInitialDelaySeconds = 40

	// DefaultLivenessProbePeriodSeconds is the default probe period (in seconds)
	// for the liveness probe
	DefaultLivenessProbePeriodSeconds = 10

	// DefaultLivenessProbeFailureThreshold is the default probe failure threshold
	// for the liveness probe
	DefaultLivenessProbeFailureThreshold = 60

	// DefaultLivenessProbeSuccessThreshold is the default probe success threshold
	// for the readiness probe
	DefaultLivenessProbeSuccessThreshold = 1

	// DefaultLivenessProbeTimeoutSeconds is the default probe timeout (in seconds)
	// for the liveness probe
	DefaultLivenessProbeTimeoutSeconds = 10

	//HdfsProbeTypeLiveness liveness type probe
	HdfsProbeTypeLiveness = "liveness"

	//HdfsProbeTypeReadiness readiness type probe
	HdfsProbeTypeReadiness = "readiness"
)
View Source
const (
	HdfsRoleNameNode    = "namenode"
	HdfsRoleDataNode    = "datanode"
	HdfsRoleJournalNode = "journalnode"
	HdfsRoleHttpFS      = "httpfs"
	HdfsRoleAll         = "hdfs"
	HdfsHomeDir         = "/opt/hadoop"
	//HdfsConfDir               = HdfsHomeDir + "/conf"
	HdfsConfDir                  = HdfsHomeDir + "/etc/hadoop"
	HdfsDataPath                 = HdfsHomeDir + "/data"
	HdfsLogsDir                  = HdfsHomeDir + "/logs"
	HttpFSTempDir                = HdfsHomeDir + "/temp"
	HdfsDiskPathPrefix           = "disk"
	CoreSiteDefaultConfFile      = "/hdfs/" + CurrentHdfsVersion + "/" + "core-site.xml.default"
	HdfsSiteDefaultConfFile      = "/hdfs/" + CurrentHdfsVersion + "/" + "hdfs-site.xml.default"
	HttpFSSiteDefaultConfFile    = "/hdfs/" + CurrentHdfsVersion + "/" + "httpfs-site.xml.default"
	DefaultHdfsParentZnodePrefix = "/hadoop-ha/"
)

Variables

View Source
var (
	HDFS_HA        = "false"
	NN0_NODE       = ""
	NN1_NODE       = ""
	NN_RPC_PORT    = "8020"
	JN_NODES       = ""
	JN_RPC_PORT    = "8485"
	ZK_NODES       = ""
	ZK_CLIENT_PORT = "2181"
)
View Source
var CustomizableCoreSiteConfKeyPrefixs = []string{
	"hadoop.security.crypto.codec.classes",
}
View Source
var CustomizableHdfsSiteConfKeyPrefixs = []string{
	"dfs.ha.namenodes",
	"dfs.namenode.rpc-address",
	"dfs.client.failover.proxy.provider",
}
View Source
var DefaultNamedPort = map[string]int32{
	"jn-rpc":   8485,
	"jn-http":  8480,
	"jn-https": 8481,
	"nn-rpc":   8020,
	"nn-http":  9870,
	"nn-https": 9871,

	"dn-http":  9864,
	"dn-rpc":   9867,
	"dn-addr":  9866,
	"dn-https": 9865,
	"hf-http":  14000,
}
View Source
var DefaultNamedPortConfKey = map[string]string{
	"jn-rpc":   "dfs.journalnode.rpc-address",
	"jn-http":  "dfs.journalnode.http-address",
	"jn-https": "dfs.journalnode.https-address",
	"nn-rpc":   "dfs.namenode.rpc-address",
	"nn-http":  "dfs.namenode.http-address",
	"nn-https": "dfs.namenode.https-address",

	"dn-http":  "dfs.datanode.http.address",
	"dn-rpc":   "dfs.datanode.ipc.address",
	"dn-addr":  "dfs.datanode.address",
	"dn-https": "dfs.datanode.https.address",
	"hf-http":  "httpfs.http.port",
}
View Source
var HDFSRole2Prefix = map[string]string{
	"namenode":    "nn",
	"datanode":    "dn",
	"journalnode": "jn",
	"httpfs":      "hf",
}

Functions

func CheckHdfsHA

func CheckHdfsHA(cluster *hdfsv1.HdfsCluster) bool

func ClusterResourceLabels

func ClusterResourceLabels(cluster *hdfsv1.HdfsCluster, role string) map[string]string

func ClusterResourceName

func ClusterResourceName(cluster *hdfsv1.HdfsCluster, suffixs ...string) string

func DefaultEnvVars

func DefaultEnvVars(role string) []corev1.EnvVar

func DefaultXml2Map

func DefaultXml2Map() (map[string]string, map[string]string, map[string]string, error)

func FillJNEnvs

func FillJNEnvs(qjournal string)

func FillNNEnvs

func FillNNEnvs(hdfsSite map[string]string)

func FillZKEnvs

func FillZKEnvs(zkEndpoints string, zkReplicas int)

func GetClusterDomain

func GetClusterDomain(cluster *hdfsv1.HdfsCluster) string

func GetRefZookeeperInfo

func GetRefZookeeperInfo(cluster *hdfsv1.HdfsCluster) (int, string, error)

func GetReplicas

func GetReplicas(cluster *hdfsv1.HdfsCluster, role string) int32

func GetStorageClassName

func GetStorageClassName(cluster *hdfsv1.HdfsCluster) string

func LogInfoInterval

func LogInfoInterval(ctx context.Context, interval int, msg string)

Types

type HdfsClusterReconciler

type HdfsClusterReconciler struct {
	client.Client
	Scheme *runtime.Scheme
}

HdfsClusterReconciler reconciles a HdfsCluster object

func (*HdfsClusterReconciler) Reconcile

func (r *HdfsClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error)

Reconcile is part of the main kubernetes reconciliation loop which aims to move the current state of the cluster closer to the desired state. TODO(user): Modify the Reconcile function to compare the state specified by the HdfsCluster object against the actual cluster state, and then perform operations to make the cluster state reflect the state specified by the user.

For more details, check Reconcile and its Result here: - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.16.0/pkg/reconcile

func (*HdfsClusterReconciler) SetupWithManager

func (r *HdfsClusterReconciler) SetupWithManager(mgr ctrl.Manager) error

SetupWithManager sets up the controller with the Manager.

type XmlConfiguration

type XmlConfiguration struct {
	XmlName    xml.Name      `xml:"configuration"`
	Properties []XmlProperty `xml:"property"`
}

type XmlProperty

type XmlProperty struct {
	Name  string `xml:"name"`
	Value string `xml:"value"`
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL