v1alpha2

package
v0.44.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 16, 2024 License: Apache-2.0 Imports: 50 Imported by: 102

Documentation

Overview

+k8s:deepcopy-gen=package,register +k8s:openapi-gen=true +k8s:defaulter-gen=TypeMeta +groupName=kubedb.com

nolint:goconst

Index

Constants

View Source
const (
	// Deprecated
	DatabaseNamePrefix = "kubedb"

	KubeDBOrganization = "kubedb"

	LabelRole = kubedb.GroupName + "/role"

	ReplicationModeDetectorContainerName = "replication-mode-detector"
	DatabasePodPrimary                   = "primary"
	DatabasePodStandby                   = "standby"

	ComponentDatabase         = "database"
	ComponentConnectionPooler = "connection-pooler"
	RoleStats                 = "stats"
	DefaultStatsPath          = "/metrics"
	DefaultPasswordLength     = 16
	HealthCheckInterval       = 10 * time.Second

	ContainerExporterName = "exporter"
	LocalHost             = "localhost"
	LocalHostIP           = "127.0.0.1"

	DBCustomConfigName             = "custom-config"
	DefaultVolumeClaimTemplateName = "data"

	DBTLSVolume         = "tls-volume"
	DBExporterTLSVolume = "exporter-tls-volume"

	CACert = "ca.crt"

	// =========================== Database key Constants ============================
	PostgresKey      = ResourceSingularPostgres + "." + kubedb.GroupName
	ElasticsearchKey = ResourceSingularElasticsearch + "." + kubedb.GroupName
	MySQLKey         = ResourceSingularMySQL + "." + kubedb.GroupName
	MariaDBKey       = ResourceSingularMariaDB + "." + kubedb.GroupName
	PerconaXtraDBKey = ResourceSingularPerconaXtraDB + "." + kubedb.GroupName
	MongoDBKey       = ResourceSingularMongoDB + "." + kubedb.GroupName
	RedisKey         = ResourceSingularRedis + "." + kubedb.GroupName
	MemcachedKey     = ResourceSingularMemcached + "." + kubedb.GroupName
	EtcdKey          = ResourceSingularEtcd + "." + kubedb.GroupName
	ProxySQLKey      = ResourceSingularProxySQL + "." + kubedb.GroupName

	// =========================== Elasticsearch Constants ============================
	ElasticsearchRestPort                        = 9200
	ElasticsearchRestPortName                    = "http"
	ElasticsearchTransportPort                   = 9300
	ElasticsearchTransportPortName               = "transport"
	ElasticsearchPerformanceAnalyzerPort         = 9600
	ElasticsearchPerformanceAnalyzerPortName     = "analyzer"
	ElasticsearchNodeRoleSet                     = "set"
	ElasticsearchConfigDir                       = "/usr/share/elasticsearch/config"
	ElasticsearchOpenSearchConfigDir             = "/usr/share/opensearch/config"
	ElasticsearchSecureSettingsDir               = "/elasticsearch/secure-settings"
	ElasticsearchTempConfigDir                   = "/elasticsearch/temp-config"
	ElasticsearchCustomConfigDir                 = "/elasticsearch/custom-config"
	ElasticsearchDataDir                         = "/usr/share/elasticsearch/data"
	ElasticsearchOpenSearchDataDir               = "/usr/share/opensearch/data"
	ElasticsearchTempDir                         = "/tmp"
	ElasticsearchOpendistroSecurityConfigDir     = "/usr/share/elasticsearch/plugins/opendistro_security/securityconfig"
	ElasticsearchOpenSearchSecurityConfigDir     = "/usr/share/opensearch/plugins/opensearch-security/securityconfig"
	ElasticsearchOpenSearchSecurityConfigDirV2   = "/usr/share/opensearch/config/opensearch-security"
	ElasticsearchSearchGuardSecurityConfigDir    = "/usr/share/elasticsearch/plugins/search-guard-%v/sgconfig"
	ElasticsearchOpendistroReadallMonitorRole    = "readall_and_monitor"
	ElasticsearchOpenSearchReadallMonitorRole    = "readall_and_monitor"
	ElasticsearchSearchGuardReadallMonitorRoleV7 = "SGS_READALL_AND_MONITOR"
	ElasticsearchSearchGuardReadallMonitorRoleV6 = "sg_readall_and_monitor"
	ElasticsearchStatusGreen                     = "green"
	ElasticsearchStatusYellow                    = "yellow"
	ElasticsearchStatusRed                       = "red"
	ElasticsearchInitSysctlContainerName         = "init-sysctl"
	ElasticsearchInitConfigMergerContainerName   = "config-merger"
	ElasticsearchContainerName                   = "elasticsearch"
	ElasticsearchExporterContainerName           = "exporter"
	ElasticsearchSearchGuardRolesMappingFileName = "sg_roles_mapping.yml"
	ElasticsearchSearchGuardInternalUserFileName = "sg_internal_users.yml"
	ElasticsearchOpendistroRolesMappingFileName  = "roles_mapping.yml"
	ElasticsearchOpendistroInternalUserFileName  = "internal_users.yml"
	ElasticsearchJavaOptsEnv                     = "ES_JAVA_OPTS"
	ElasticsearchOpenSearchJavaOptsEnv           = "OPENSEARCH_JAVA_OPTS"
	ElasticsearchVolumeConfig                    = "esconfig"
	ElasticsearchVolumeTempConfig                = "temp-config"
	ElasticsearchVolumeSecurityConfig            = "security-config"
	ElasticsearchVolumeSecureSettings            = "secure-settings"
	ElasticsearchVolumeCustomConfig              = "custom-config"
	ElasticsearchVolumeData                      = "data"
	ElasticsearchVolumeTemp                      = "temp"

	// Ref:
	//	- https://www.elastic.co/guide/en/elasticsearch/reference/7.6/heap-size.html#heap-size
	//	- no more than 50% of your physical RAM
	//	- no more than 32GB that the JVM uses for compressed object pointers (compressed oops)
	//	- no more than 26GB for zero-based compressed oops;
	// 26 GB is safe on most systems
	ElasticsearchMaxHeapSize = 26 * 1024 * 1024 * 1024
	// 128MB
	ElasticsearchMinHeapSize = 128 * 1024 * 1024

	// =========================== Memcached Constants ============================
	MemcachedDatabasePortName       = "db"
	MemcachedPrimaryServicePortName = "primary"
	MemcachedDatabasePort           = 11211

	MongoDBDatabasePortName       = "db"
	MongoDBPrimaryServicePortName = "primary"
	MongoDBDatabasePort           = 27017
	MongoDBKeyFileSecretSuffix    = "-key"
	MongoDBRootUsername           = "root"
	MongoDBCustomConfigFile       = "mongod.conf"
	MongoDBReplicaSetConfig       = "replicaset.json"
	MongoDBConfigurationJSFile    = "configuration.js"
	NodeTypeMongos                = "mongos"
	NodeTypeShard                 = "shard"
	NodeTypeConfig                = "configsvr"
	NodeTypeArbiter               = "arbiter"
	NodeTypeHidden                = "hidden"
	NodeTypeReplica               = "replica"
	NodeTypeStandalone            = "standalone"

	MongoDBWorkDirectoryName = "workdir"
	MongoDBWorkDirectoryPath = "/work-dir"

	MongoDBCertDirectoryName = "certdir"

	MongoDBDataDirectoryName = "datadir"
	MongoDBDataDirectoryPath = "/data/db"

	MongoDBInitInstallContainerName   = "copy-config"
	MongoDBInitBootstrapContainerName = "bootstrap"

	MongoDBConfigDirectoryName = "config"
	MongoDBConfigDirectoryPath = "/data/configdb"

	MongoDBInitialConfigDirectoryName = "configdir"
	MongoDBInitialConfigDirectoryPath = "/configdb-readonly"

	MongoDBInitScriptDirectoryName = "init-scripts"
	MongoDBInitScriptDirectoryPath = "/init-scripts"

	MongoDBInitialDirectoryName = "initial-script"
	MongoDBInitialDirectoryPath = "/docker-entrypoint-initdb.d"

	MongoDBClientCertDirectoryName = "client-cert"
	MongoDBClientCertDirectoryPath = "/client-cert"

	MongoDBServerCertDirectoryName = "server-cert"
	MongoDBServerCertDirectoryPath = "/server-cert"

	MongoDBInitialKeyDirectoryName = "keydir"
	MongoDBInitialKeyDirectoryPath = "/keydir-readonly"

	MongoDBContainerName = ResourceSingularMongoDB

	MongoDBDefaultVolumeClaimTemplateName = MongoDBDataDirectoryName

	MongodbUser             = "root"
	MongoDBKeyForKeyFile    = "key.txt"
	MongoDBAuthSecretSuffix = "-auth"

	// =========================== MySQL Constants ============================
	MySQLMetricsExporterConfigSecretSuffix = "metrics-exporter-config"
	MySQLDatabasePortName                  = "db"
	MySQLRouterReadWritePortName           = "rw"
	MySQLRouterReadOnlyPortName            = "ro"
	MySQLPrimaryServicePortName            = "primary"
	MySQLStandbyServicePortName            = "standby"
	MySQLDatabasePort                      = 3306
	MySQLRouterReadWritePort               = 6446
	MySQLRouterReadOnlyPort                = 6447

	MySQLCoordinatorClientPort = 2379
	MySQLCoordinatorPort       = 2380
	MySQLCoordinatorStatus     = "Coordinator/Status"

	MySQLGroupComPort    = 33060
	MySQLMaxGroupMembers = 9
	// The recommended MySQL server version for group replication (GR)
	MySQLGRRecommendedVersion = "8.0.23"
	MySQLDefaultGroupSize     = 3
	MySQLRootUserName         = "MYSQL_ROOT_USERNAME"
	MySQLRootPassword         = "MYSQL_ROOT_PASSWORD"
	MySQLName                 = "MYSQL_NAME"
	MySQLRootUser             = "root"

	MySQLTLSConfigCustom     = "custom"
	MySQLTLSConfigSkipVerify = "skip-verify"
	MySQLTLSConfigTrue       = "true"
	MySQLTLSConfigFalse      = "false"
	MySQLTLSConfigPreferred  = "preferred"

	MySQLContainerName            = "mysql"
	MySQLRouterContainerName      = "mysql-router"
	MySQLRouterInitContainerName  = "mysql-router-init"
	MySQLCoordinatorContainerName = "mysql-coordinator"
	MySQLInitContainerName        = "mysql-init"

	MySQLRouterInitScriptDirectoryName = "init-scripts"
	MySQLRouterInitScriptDirectoryPath = "/scripts"
	MySQLRouterConfigDirectoryName     = "router-config-secret"
	MySQLRouterConfigDirectoryPath     = "/etc/mysqlrouter"
	MySQLRouterTLSDirectoryName        = "router-tls-volume"
	MySQLRouterTLSDirectoryPath        = "/etc/mysql/certs"
	MySQLReplicationUser               = "repl"

	MySQLComponentKey    = MySQLKey + "/component"
	MySQLComponentDB     = "database"
	MySQLComponentRouter = "router"

	MySQLVolumeNameTemp      = "tmp"
	MySQLVolumeMountPathTemp = "/tmp"

	MySQLVolumeNameData      = "data"
	MySQLVolumeMountPathData = "/var/lib/mysql"

	MySQLVolumeNameUserInitScript      = "initial-script"
	MySQLVolumeMountPathUserInitScript = "/docker-entrypoint-initdb.d"

	MySQLVolumeNameInitScript      = "init-scripts"
	MySQLVolumeMountPathInitScript = "/scripts"

	MySQLVolumeNameCustomConfig      = "custom-config"
	MySQLVolumeMountPathCustomConfig = "/etc/mysql/conf.d"

	MySQLVolumeNameTLS      = "tls-volume"
	MySQLVolumeMountPathTLS = "/etc/mysql/certs"

	MySQLVolumeNameExporterTLS      = "exporter-tls-volume"
	MySQLVolumeMountPathExporterTLS = "/etc/mysql/certs"

	MySQLVolumeNameSourceCA      = "source-ca"
	MySQLVolumeMountPathSourceCA = "/etc/mysql/server/certs"

	// =========================== PerconaXtraDB Constants ============================
	PerconaXtraDBClusterRecommendedVersion     = "5.7"
	PerconaXtraDBMaxClusterNameLength          = 32
	PerconaXtraDBStandaloneReplicas            = 1
	PerconaXtraDBDefaultClusterSize            = 3
	PerconaXtraDBDataMountPath                 = "/var/lib/mysql"
	PerconaXtraDBDataLostFoundPath             = PerconaXtraDBDataMountPath + "/lost+found"
	PerconaXtraDBInitDBVolumeName              = "initial-script"
	PerconaXtraDBInitDBMountPath               = "/docker-entrypoint-initdb.d"
	PerconaXtraDBCustomConfigMountPath         = "/etc/percona-server.conf.d/"
	PerconaXtraDBClusterCustomConfigMountPath  = "/etc/mysql/custom.conf.d/"
	PerconaXtraDBCustomConfigVolumeName        = "custom-config"
	PerconaXtraDBTLSConfigCustom               = "custom"
	PerconaXtraDBInitContainerName             = "px-init"
	PerconaXtraDBCoordinatorContainerName      = "px-coordinator"
	PerconaXtraDBRunScriptVolumeName           = "run-script"
	PerconaXtraDBRunScriptVolumeMountPath      = "/run-script"
	PerconaXtraDBInitScriptVolumeName          = "init-scripts"
	PerconaXtraDBInitScriptVolumeMountPath     = "/scripts"
	PerconaXtraDBContainerName                 = ResourceSingularPerconaXtraDB
	PerconaXtraDBCertMountPath                 = "/etc/mysql/certs"
	PerconaXtraDBExporterConfigFileName        = "exporter.cnf"
	PerconaXtraDBGaleraClusterPrimaryComponent = "Primary"
	PerconaXtraDBServerTLSVolumeName           = "tls-server-config"
	PerconaXtraDBClientTLSVolumeName           = "tls-client-config"
	PerconaXtraDBExporterTLSVolumeName         = "tls-metrics-exporter-config"
	PerconaXtraDBMetricsExporterTLSVolumeName  = "metrics-exporter-config"
	PerconaXtraDBMetricsExporterConfigPath     = "/etc/mysql/config/exporter"
	PerconaXtraDBDataVolumeName                = "data"
	PerconaXtraDBMySQLUserGroupID              = 1001

	// =========================== MariaDB Constants ============================
	MariaDBMaxClusterNameLength          = 32
	MariaDBStandaloneReplicas            = 1
	MariaDBDefaultClusterSize            = 3
	MariaDBDataMountPath                 = "/var/lib/mysql"
	MariaDBDataLostFoundPath             = MariaDBDataMountPath + "/lost+found"
	MariaDBInitDBVolumeName              = "initial-script"
	MariaDBInitDBMountPath               = "/docker-entrypoint-initdb.d"
	MariaDBCustomConfigMountPath         = "/etc/mysql/conf.d/"
	MariaDBClusterCustomConfigMountPath  = "/etc/mysql/custom.conf.d/"
	MariaDBCustomConfigVolumeName        = "custom-config"
	MariaDBTLSConfigCustom               = "custom"
	MariaDBInitContainerName             = "mariadb-init"
	MariaDBCoordinatorContainerName      = "md-coordinator"
	MariaDBRunScriptVolumeName           = "run-script"
	MariaDBRunScriptVolumeMountPath      = "/run-script"
	MariaDBInitScriptVolumeName          = "init-scripts"
	MariaDBInitScriptVolumeMountPath     = "/scripts"
	MariaDBContainerName                 = ResourceSingularMariaDB
	MariaDBCertMountPath                 = "/etc/mysql/certs"
	MariaDBExporterConfigFileName        = "exporter.cnf"
	MariaDBGaleraClusterPrimaryComponent = "Primary"
	MariaDBServerTLSVolumeName           = "tls-server-config"
	MariaDBClientTLSVolumeName           = "tls-client-config"
	MariaDBExporterTLSVolumeName         = "tls-metrics-exporter-config"
	MariaDBMetricsExporterTLSVolumeName  = "metrics-exporter-config"
	MariaDBMetricsExporterConfigPath     = "/etc/mysql/config/exporter"
	MariaDBDataVolumeName                = "data"

	// =========================== SingleStore Constants ============================
	SinglestoreDatabasePortName       = "db"
	SinglestorePrimaryServicePortName = "primary"
	SinglestoreStudioPortName         = "studio"
	SinglestoreDatabasePort           = 3306
	SinglestoreStudioPort             = 8081
	SinglestoreExporterPort           = 9104
	SinglestoreRootUserName           = "ROOT_USERNAME"
	SinglestoreRootPassword           = "ROOT_PASSWORD"
	SinglestoreRootUser               = "root"
	DatabasePodMaster                 = "Master"
	DatabasePodAggregator             = "Aggregator"
	DatabasePodLeaf                   = "Leaf"
	PetSetTypeAggregator              = "aggregator"
	PetSetTypeLeaf                    = "leaf"
	SinglestoreDatabaseHealth         = "singlestore_health"
	SinglestoreTableHealth            = "singlestore_health_table"

	SinglestoreCoordinatorContainerName = "singlestore-coordinator"
	SinglestoreContainerName            = "singlestore"
	SinglestoreInitContainerName        = "singlestore-init"

	SinglestoreVolumeNameUserInitScript      = "initial-script"
	SinglestoreVolumeMountPathUserInitScript = "/docker-entrypoint-initdb.d"
	SinglestoreVolumeNameCustomConfig        = "custom-config"
	SinglestoreVolumeMountPathCustomConfig   = "/config"
	SinglestoreVolmeNameInitScript           = "init-scripts"
	SinglestoreVolumeMountPathInitScript     = "/scripts"
	SinglestoreVolumeNameData                = "data"
	SinglestoreVolumeMountPathData           = "/var/lib/memsql"

	// =========================== PostgreSQL Constants ============================
	PostgresDatabasePortName          = "db"
	PostgresPrimaryServicePortName    = "primary"
	PostgresStandbyServicePortName    = "standby"
	PostgresDatabasePort              = 5432
	PostgresPodPrimary                = "primary"
	PostgresPodStandby                = "standby"
	EnvPostgresUser                   = "POSTGRES_USER"
	EnvPostgresPassword               = "POSTGRES_PASSWORD"
	PostgresRootUser                  = "postgres"
	PostgresCoordinatorContainerName  = "pg-coordinator"
	PostgresCoordinatorPort           = 2380
	PostgresCoordinatorPortName       = "coordinator"
	PostgresContainerName             = ResourceSingularPostgres
	PostgresInitContainerName         = "postgres-init-container"
	PostgresCoordinatorClientPort     = 2379
	PostgresCoordinatorClientPortName = "coordinatclient"

	RaftMetricsExporterPort     = 23790
	RaftMetricsExporterPortName = "raft-metrics"

	PostgresInitVolumeName           = "initial-script"
	PostgresInitDir                  = "/var/initdb"
	PostgresSharedMemoryVolumeName   = "shared-memory"
	PostgresSharedMemoryDir          = "/dev/shm"
	PostgresDataVolumeName           = "data"
	PostgresDataDir                  = "/var/pv"
	PostgresCustomConfigVolumeName   = "custom-config"
	PostgresCustomConfigDir          = "/etc/config"
	PostgresRunScriptsVolumeName     = "run-scripts"
	PostgresRunScriptsDir            = "/run_scripts"
	PostgresRoleScriptsVolumeName    = "role-scripts"
	PostgresRoleScriptsDir           = "/role_scripts"
	PostgresSharedScriptsVolumeName  = "scripts"
	PostgresSharedScriptsDir         = "/scripts"
	PostgresSharedTlsVolumeName      = "certs"
	PostgresSharedTlsVolumeMountPath = "/tls/certs"

	PostgresKeyFileSecretSuffix = "key"
	PostgresPEMSecretSuffix     = "pem"
	PostgresDefaultUsername     = "postgres"
	PostgresPgCoordinatorStatus = "Coordinator/Status"
	// to pause the failover for postgres. this is helpful for ops request
	PostgresPgCoordinatorStatusPause = "Pause"
	// to resume the failover for postgres. this is helpful for ops request
	PostgresPgCoordinatorStatusResume = "Resume"

	// when we need to resume pg-coordinator as non transferable we are going to set this state.
	// this is useful when we have set a node as primary and you don't want other node rather then this node to become primary.
	PostgresPgCoordinatorStatusResumeNonTransferable = "NonTransferableResume"

	SharedBuffersGbAsByte = 1024 * 1024 * 1024
	SharedBuffersMbAsByte = 1024 * 1024

	SharedBuffersGbAsKiloByte = 1024 * 1024
	SharedBuffersMbAsKiloByte = 1024
	IPS_LOCK                  = "IPC_LOCK"
	SYS_RESOURCE              = "SYS_RESOURCE"
	DropCapabilityALL         = "ALL"

	// =========================== ProxySQL Constants ============================
	LabelProxySQLName                  = ProxySQLKey + "/name"
	LabelProxySQLLoadBalance           = ProxySQLKey + "/load-balance"
	LabelProxySQLLoadBalanceStandalone = "Standalone"

	ProxySQLContainerName          = ResourceSingularProxySQL
	ProxySQLDatabasePort           = 6033
	ProxySQLDatabasePortName       = "db"
	ProxySQLPrimaryServicePortName = "db"
	ProxySQLAdminPort              = 6032
	ProxySQLAdminPortName          = "admin"
	ProxySQLDataMountPath          = "/var/lib/proxysql"
	ProxySQLCustomConfigMountPath  = "/etc/custom-config"

	ProxySQLBackendSSLMountPath  = "/var/lib/certs"
	ProxySQLFrontendSSLMountPath = "/var/lib/frontend"
	ProxySQLClusterAdmin         = "cluster"
	ProxySQLClusterPasswordField = "cluster_password"
	ProxySQLTLSConfigCustom      = "custom"
	ProxySQLTLSConfigSkipVerify  = "skip-verify"

	ProxySQLMonitorUsername = "proxysql"
	ProxySQLAuthUsername    = "cluster"
	ProxySQLConfigSecretKey = "proxysql.cnf"

	// =========================== Redis Constants ============================
	RedisConfigKey = "redis.conf" // RedisConfigKey is going to create for the customize redis configuration
	// DefaultConfigKey is going to create for the default redis configuration
	RedisContainerName             = ResourceSingularRedis
	RedisSentinelContainerName     = "redissentinel"
	DefaultConfigKey               = "default.conf"
	RedisShardKey                  = RedisKey + "/shard"
	RedisDatabasePortName          = "db"
	RedisPrimaryServicePortName    = "primary"
	RedisDatabasePort              = 6379
	RedisSentinelPort              = 26379
	RedisGossipPortName            = "gossip"
	RedisGossipPort                = 16379
	RedisSentinelPortName          = "sentinel"
	RedisInitContainerName         = "redis-init"
	RedisCoordinatorContainerName  = "rd-coordinator"
	RedisSentinelInitContainerName = "sentinel-init"

	RedisScriptVolumeName      = "script-vol"
	RedisScriptVolumePath      = "/scripts"
	RedisDataVolumeName        = "data"
	RedisDataVolumePath        = "/data"
	RedisTLSVolumeName         = "tls-volume"
	RedisExporterTLSVolumeName = "exporter-tls-volume"
	RedisTLSVolumePath         = "/certs"
	RedisSentinelTLSVolumeName = "sentinel-tls-volume"
	RedisSentinelTLSVolumePath = "/sentinel-certs"
	RedisConfigVolumeName      = "redis-config"
	RedisConfigVolumePath      = "/usr/local/etc/redis/"
	RedisInitVolumeName        = "init-volume"
	RedisInitVolumePath        = "/init"

	RedisNodeFlagMaster = "master"
	RedisNodeFlagNoAddr = "noaddr"
	RedisNodeFlagSlave  = "slave"

	RedisKeyFileSecretSuffix = "key"
	RedisPEMSecretSuffix     = "pem"
	RedisRootUsername        = "default"

	EnvRedisUser              = "USERNAME"
	EnvRedisPassword          = "REDISCLI_AUTH"
	EnvRedisMode              = "REDIS_MODE"
	EnvRedisMajorRedisVersion = "MAJOR_REDIS_VERSION"

	// =========================== PgBouncer Constants ============================
	PgBouncerUpstreamServerCA               = "upstream-server-ca.crt"
	PgBouncerUpstreamServerClientCert       = "upstream-server-client.crt"
	PgBouncerUpstreamServerClientKey        = "upstream-server-client.key"
	PgBouncerClientCrt                      = "client.crt"
	PgBouncerClientKey                      = "client.key"
	PgBouncerCACrt                          = "ca.crt"
	PgBouncerTLSCrt                         = "tls.crt"
	PgBouncerTLSKey                         = "tls.key"
	PgBouncerDatabasePortName               = "db"
	PgBouncerPrimaryServicePortName         = "primary"
	PgBouncerDatabasePort                   = 5432
	PgBouncerConfigFile                     = "pgbouncer.ini"
	PgBouncerAdminUsername                  = "pgbouncer"
	PgBouncerDefaultPoolMode                = "session"
	PgBouncerDefaultIgnoreStartupParameters = "empty"

	// =========================== Pgpool Constants ============================
	EnvPostgresUsername                = "POSTGRES_USERNAME"
	EnvPgpoolPcpUser                   = "PGPOOL_PCP_USER"
	EnvPgpoolPcpPassword               = "PGPOOL_PCP_PASSWORD"
	EnvPgpoolPasswordEncryptionMethod  = "PGPOOL_PASSWORD_ENCRYPTION_METHOD"
	EnvEnablePoolPasswd                = "PGPOOL_ENABLE_POOL_PASSWD"
	EnvSkipPasswdEncryption            = "PGPOOL_SKIP_PASSWORD_ENCRYPTION"
	PgpoolConfigSecretMountPath        = "/config"
	PgpoolConfigVolumeName             = "pgpool-config"
	PgpoolContainerName                = "pgpool"
	PgpoolDefaultServicePort           = 9999
	PgpoolMonitoringDefaultServicePort = 9719
	PgpoolExporterDatabase             = "postgres"
	EnvPgpoolExporterDatabase          = "POSTGRES_DATABASE"
	EnvPgpoolService                   = "PGPOOL_SERVICE"
	EnvPgpoolServicePort               = "PGPOOL_SERVICE_PORT"
	EnvPgpoolSSLMode                   = "SSLMODE"
	PgpoolDefaultSSLMode               = "disable"
	PgpoolExporterContainerName        = "exporter"
	PgpoolAuthUsername                 = "pcp"
	SyncPeriod                         = 10

	KubeDBZooKeeperRoleName         = "kubedb:zookeeper-version-reader"
	KubeDBZooKeeperRoleBindingName  = "kubedb:zookeeper-version-reader"
	ZooKeeperClientPortName         = "client"
	ZooKeeperClientPort             = 2181
	ZooKeeperQuorumPortName         = "quorum"
	ZooKeeperQuorumPort             = 2888
	ZooKeeperLeaderElectionPortName = "leader-election"
	ZooKeeperLeaderElectionPort     = 3888
	ZooKeeperMetricsPortName        = "metrics"
	ZooKeeperMetricsPort            = 7000
	ZooKeeperAdminServerPortName    = "admin-server"
	ZooKeeperAdminServerPort        = 8080
	ZooKeeperNode                   = "/kubedb_health_checker_node"
	ZooKeeperData                   = "kubedb_health_checker_data"
	ZooKeeperConfigVolumeName       = "zookeeper-config"
	ZooKeeperConfigVolumePath       = "/conf"
	ZooKeeperDataVolumeName         = "data"
	ZooKeeperDataVolumePath         = "/data"
	ZooKeeperScriptVolumeName       = "script-vol"
	ZooKeeperScriptVolumePath       = "/scripts"
	ZooKeeperContainerName          = ResourceSingularZooKeeper
	ZooKeeperInitContainerName      = ResourceSingularZooKeeper + "-init"

	ZooKeeperConfigFileName               = "zoo.cfg"
	ZooKeeperLog4jPropertiesFileName      = "log4j.properties"
	ZooKeeperLog4jQuietPropertiesFileName = "log4j-quiet.properties"

	EnvZooKeeperDomain          = "DOMAIN"
	EnvZooKeeperQuorumPort      = "QUORUM_PORT"
	EnvZooKeeperLeaderPort      = "LEADER_PORT"
	EnvZooKeeperClientHost      = "CLIENT_HOST"
	EnvZooKeeperClientPort      = "CLIENT_PORT"
	EnvZooKeeperAdminServerHost = "ADMIN_SERVER_HOST"
	EnvZooKeeperAdminServerPort = "ADMIN_SERVER_PORT"
	EnvZooKeeperClusterName     = "CLUSTER_NAME"
	EnvZooKeeperClusterSize     = "CLUSTER_SIZE"
	EnvZooKeeperUser            = "ZK_USER"
	EnvZooKeeperPassword        = "ZK_PASSWORD"
	EnvZooKeeperJaasFilePath    = "ZK_JAAS_FILE_PATH"
	EnvZooKeeperJVMFLags        = "JVMFLAGS"

	ZooKeeperSuperUsername       = "super"
	ZooKeeperSASLAuthLoginConfig = "-Djava.security.auth.login.config"
	ZooKeeperJaasFilePath        = "/data/jaas.conf"
)
View Source
const (
	// used for Databases that have started provisioning
	DatabaseProvisioningStarted = "ProvisioningStarted"
	// used for Databases which completed provisioning
	DatabaseProvisioned = "Provisioned"
	// used for Databases that are currently being initialized using stash
	DatabaseDataRestoreStarted = "DataRestoreStarted"
	// used for Databases that have been initialized using stash
	DatabaseDataRestored = "DataRestored"
	// used for Databases whose pods are ready
	DatabaseReplicaReady = "ReplicaReady"
	// used for Databases that are currently accepting connection
	DatabaseAcceptingConnection = "AcceptingConnection"
	// used for Databases that report status OK (also implies that we can connect to it)
	DatabaseReady = "Ready"
	// used for database that reports ok when all the instances are available
	ServerReady = "ServerReady"
	// used for Databases that are paused
	DatabasePaused = "Paused"
	// used for Databases that are halted
	DatabaseHalted = "Halted"
	// used for pausing health check of a Database
	DatabaseHealthCheckPaused = "HealthCheckPaused"
	// used for Databases whose internal user credentials are synced
	InternalUsersSynced = "InternalUsersSynced"
	// user for databases that have read access
	DatabaseReadAccess = "DatabaseReadAccess"
	// user for databases that have write access
	DatabaseWriteAccess = "DatabaseWriteAccess"

	// Condition reasons
	DataRestoreStartedByExternalInitializer    = "DataRestoreStartedByExternalInitializer"
	DataRestoreInterrupted                     = "DataRestoreInterrupted"
	DatabaseSuccessfullyRestored               = "SuccessfullyDataRestored"
	FailedToRestoreData                        = "FailedToRestoreData"
	AllReplicasAreReady                        = "AllReplicasReady"
	SomeReplicasAreNotReady                    = "SomeReplicasNotReady"
	DatabaseAcceptingConnectionRequest         = "DatabaseAcceptingConnectionRequest"
	DatabaseNotAcceptingConnectionRequest      = "DatabaseNotAcceptingConnectionRequest"
	ReadinessCheckSucceeded                    = "ReadinessCheckSucceeded"
	ReadinessCheckFailed                       = "ReadinessCheckFailed"
	DatabaseProvisioningStartedSuccessfully    = "DatabaseProvisioningStartedSuccessfully"
	DatabaseSuccessfullyProvisioned            = "DatabaseSuccessfullyProvisioned"
	DatabaseHaltedSuccessfully                 = "DatabaseHaltedSuccessfully"
	DatabaseReadAccessCheckSucceeded           = "DatabaseReadAccessCheckSucceeded"
	DatabaseWriteAccessCheckSucceeded          = "DatabaseWriteAccessCheckSucceeded"
	DatabaseReadAccessCheckFailed              = "DatabaseReadAccessCheckFailed"
	DatabaseWriteAccessCheckFailed             = "DatabaseWriteAccessCheckFailed"
	InternalUsersCredentialSyncFailed          = "InternalUsersCredentialsSyncFailed"
	InternalUsersCredentialsSyncedSuccessfully = "InternalUsersCredentialsSyncedSuccessfully"
)

List of possible condition types for a KubeDB object

View Source
const (
	KafkaPortNameREST                  = "http"
	KafkaPortNameController            = "controller"
	KafkaPortNameCruiseControlListener = "cc-listener"
	KafkaPortNameCruiseControlREST     = "cc-rest"
	KafkaBrokerClientPortName          = "broker"
	KafkaControllerClientPortName      = "controller"
	KafkaPortNameLocal                 = "local"
	KafkaTopicNameHealth               = "kafka-health"
	KafkaTopicDeletionThresholdOffset  = 1000
	KafkaBrokerMaxID                   = 1000
	KafkaRESTPort                      = 9092
	KafkaControllerRESTPort            = 9093
	KafkaLocalRESTPort                 = 29092
	KafkaCruiseControlRESTPort         = 9090
	KafkaCruiseControlListenerPort     = 9094
	KafkaCCDefaultInNetwork            = 500000
	KafkaCCDefaultOutNetwork           = 500000

	KafkaContainerName          = "kafka"
	KafkaUserAdmin              = "admin"
	KafkaNodeRoleSet            = "set"
	KafkaNodeRolesCombined      = "controller,broker"
	KafkaNodeRolesController    = "controller"
	KafkaNodeRolesBrokers       = "broker"
	KafkaNodeRolesCruiseControl = "cruise-control"
	KafkaStandbyServiceSuffix   = "standby"

	KafkaBrokerListener     = "KafkaBrokerListener"
	KafkaControllerListener = "KafkaControllerListener"

	KafkaDataDir                              = "/var/log/kafka"
	KafkaMetaDataDir                          = "/var/log/kafka/metadata"
	KafkaCertDir                              = "/var/private/ssl"
	KafkaConfigDir                            = "/opt/kafka/config/kafkaconfig"
	KafkaTempConfigDir                        = "/opt/kafka/config/temp-config"
	KafkaCustomConfigDir                      = "/opt/kafka/config/custom-config"
	KafkaCCTempConfigDir                      = "/opt/cruise-control/temp-config"
	KafkaCCCustomConfigDir                    = "/opt/cruise-control/custom-config"
	KafkaCapacityConfigPath                   = "config/capacity.json"
	KafkaConfigFileName                       = "config.properties"
	KafkaServerCustomConfigFileName           = "server.properties"
	KafkaBrokerCustomConfigFileName           = "broker.properties"
	KafkaControllerCustomConfigFileName       = "controller.properties"
	KafkaSSLPropertiesFileName                = "ssl.properties"
	KafkaClientAuthConfigFileName             = "clientauth.properties"
	KafkaCruiseControlConfigFileName          = "cruisecontrol.properties"
	KafkaCruiseControlCapacityConfigFileName  = "capacity.json"
	KafkaCruiseControlBrokerSetConfigFileName = "brokerSets.json"
	KafkaCruiseControlClusterConfigFileName   = "clusterConfigs.json"
	KafkaCruiseControlLog4jConfigFileName     = "log4j.properties"
	KafkaCruiseControlUIConfigFileName        = "config.csv"

	KafkaListeners                         = "listeners"
	KafkaAdvertisedListeners               = "advertised.listeners"
	KafkaBootstrapServers                  = "bootstrap.servers"
	KafkaListenerSecurityProtocolMap       = "listener.security.protocol.map"
	KafkaControllerNodeCount               = "controller.count"
	KafkaControllerQuorumVoters            = "controller.quorum.voters"
	KafkaControllerListenersName           = "controller.listener.names"
	KafkaInterBrokerListener               = "inter.broker.listener.name"
	KafkaNodeRole                          = "process.roles"
	KafkaClusterID                         = "cluster.id"
	KafkaClientID                          = "client.id"
	KafkaDataDirName                       = "log.dirs"
	KafkaMetadataDirName                   = "metadata.log.dir"
	KafkaKeystorePasswordKey               = "keystore_password"
	KafkaTruststorePasswordKey             = "truststore_password"
	KafkaServerKeystoreKey                 = "server.keystore.jks"
	KafkaServerTruststoreKey               = "server.truststore.jks"
	KafkaSecurityProtocol                  = "security.protocol"
	KafkaGracefulShutdownTimeout           = "task.shutdown.graceful.timeout.ms"
	KafkaTopicConfigProviderClass          = "topic.config.provider.class"
	KafkaCapacityConfigFile                = "capacity.config.file"
	KafkaTwoStepVerification               = "two.step.verification.enabled"
	KafkaBrokerFailureDetection            = "kafka.broker.failure.detection.enable"
	KafkaMetricSamplingInterval            = "metric.sampling.interval.ms"
	KafkaPartitionMetricsWindow            = "partition.metrics.window.ms"
	KafkaPartitionMetricsWindowNum         = "num.partition.metrics.windows"
	KafkaSampleStoreTopicReplicationFactor = "sample.store.topic.replication.factor"

	KafkaEndpointVerifyAlgo  = "ssl.endpoint.identification.algorithm"
	KafkaKeystoreLocation    = "ssl.keystore.location"
	KafkaTruststoreLocation  = "ssl.truststore.location"
	KafkaKeystorePassword    = "ssl.keystore.password"
	KafkaTruststorePassword  = "ssl.truststore.password"
	KafkaKeyPassword         = "ssl.key.password"
	KafkaKeystoreDefaultPass = "changeit"

	KafkaMetricReporters       = "metric.reporters"
	KafkaAutoCreateTopicEnable = "auto.create.topics.enable"

	KafkaEnabledSASLMechanisms       = "sasl.enabled.mechanisms"
	KafkaSASLMechanism               = "sasl.mechanism"
	KafkaMechanismControllerProtocol = "sasl.mechanism.controller.protocol"
	KafkaSASLInterBrokerProtocol     = "sasl.mechanism.inter.broker.protocol"
	KafkaSASLPLAINConfigKey          = "listener.name.SASL_PLAINTEXT.plain.sasl.jaas.config"
	KafkaSASLSSLConfigKey            = "listener.name.SASL_SSL.plain.sasl.jaas.config"
	KafkaSASLJAASConfig              = "sasl.jaas.config"
	KafkaServiceName                 = "serviceName"
	KafkaSASLPlainMechanism          = "PLAIN"

	KafkaCCMetricSamplerClass            = "metric.sampler.class"
	KafkaCCCapacityConfig                = "capacity.config.file"
	KafkaCCTwoStepVerificationEnabled    = "two.step.verification.enabled"
	KafkaCCBrokerFailureDetectionEnabled = "kafka.broker.failure.detection.enable"
	KafkaOffSetTopicReplica              = "offsets.topic.replication.factor"
	KafkaTransactionStateLogReplica      = "transaction.state.log.replication.factor"
	KafkaTransactionSateLogMinISR        = "transaction.state.log.min.isr"
	KafkaLogCleanerMinLagSec             = "log.cleaner.min.compaction.lag.ms"
	KafkaLogCleanerBackoffMS             = "log.cleaner.backoff.ms"

	KafkaCCKubernetesMode                 = "cruise.control.metrics.reporter.kubernetes.mode"
	KafkaCCBootstrapServers               = "cruise.control.metrics.reporter.bootstrap.servers"
	KafkaCCMetricTopicAutoCreate          = "cruise.control.metrics.topic.auto.create"
	KafkaCCMetricTopicNumPartition        = "cruise.control.metrics.topic.num.partitions"
	KafkaCCMetricTopicReplica             = "cruise.control.metrics.topic.replication.factor"
	KafkaCCMetricReporterSecurityProtocol = "cruise.control.metrics.reporter.security.protocol"
	KafkaCCMetricReporterSaslMechanism    = "cruise.control.metrics.reporter.sasl.mechanism"
	KafkaCCSampleLoadingThreadsNum        = "num.sample.loading.threads"
	KafkaCCMinSamplesPerBrokerWindow      = "min.samples.per.broker.metrics.window"

	KafkaVolumeData         = "data"
	KafkaVolumeConfig       = "kafkaconfig"
	KafkaVolumeTempConfig   = "temp-config"
	KafkaVolumeCustomConfig = "custom-config"

	EnvKafkaUser     = "KAFKA_USER"
	EnvKafkaPassword = "KAFKA_PASSWORD"

	KafkaListenerPLAINTEXTProtocol = "PLAINTEXT"
	KafkaListenerSASLProtocol      = "SASL_PLAINTEXT"
	KafkaListenerSASLSSLProtocol   = "SASL_SSL"

	KafkaCCMetricsSampler         = "com.linkedin.kafka.cruisecontrol.monitor.sampling.CruiseControlMetricsReporterSampler"
	KafkaAdminTopicConfigProvider = "com.linkedin.kafka.cruisecontrol.config.KafkaAdminTopicConfigProvider"
	KafkaCCMetricReporter         = "com.linkedin.kafka.cruisecontrol.metricsreporter.CruiseControlMetricsReporter"
	KafkaJMXMetricReporter        = "org.apache.kafka.common.metrics.JmxReporter"

	// =========================== Solr Constants ============================
	ResourceCodeSolr      = "sl"
	ResourceKindSolr      = "Solr"
	ResourceSingularSolr  = "solr"
	ResourcePluralSolr    = "solrs"
	SolrPortName          = "http"
	SolrRestPort          = 8983
	SolrSecretKey         = "solr.xml"
	SolrContainerName     = "solr"
	SolrInitContainerName = "init-solr"
	SolrAdmin             = "admin"
	SecurityJSON          = "security.json"
	SolrZkDigest          = "zk-digest"
	SolrZkReadonlyDigest  = "zk-digest-readonly"

	SolrVolumeDefaultConfig = "default-config"
	SolrVolumeCustomConfig  = "custom-config"
	SolrVolumeAuthConfig    = "auth-config"
	SolrVolumeData          = "data"
	SolrVolumeConfig        = "slconfig"

	DistLibs              = "/opt/solr/dist"
	ContribLibs           = "/opt/solr/contrib/%s/lib"
	SysPropLibPlaceholder = "${solr.sharedLib:}"
	SolrHomeDir           = "/var/solr"
	SolrDataDir           = "/var/solr/data"
	SolrTempConfigDir     = "/temp-config"
	SolrCustomConfigDir   = "/custom-config"
	SolrSecurityConfigDir = "/var/security"

	SolrCloudHostKey                       = "host"
	SolrCloudHostValue                     = ""
	SolrCloudHostPortKey                   = "hostPort"
	SolrCloudHostPortValue                 = 80
	SolrCloudHostContextKey                = "hostContext"
	SolrCloudHostContextValue              = "solr"
	SolrCloudGenericCoreNodeNamesKey       = "genericCoreNodeNames"
	SolrCloudGenericCoreNodeNamesValue     = true
	SolrCloudZKClientTimeoutKey            = "zkClientTimeout"
	SolrCloudZKClientTimeoutValue          = 30000
	SolrCloudDistribUpdateSoTimeoutKey     = "distribUpdateSoTimeout"
	SolrCloudDistribUpdateSoTimeoutValue   = 600000
	SolrCloudDistribUpdateConnTimeoutKey   = "distribUpdateConnTimeout"
	SolrCloudDistribUpdateConnTimeoutValue = 60000
	SolrCloudZKCredentialProviderKey       = "zkCredentialsProvider"
	SolrCloudZKCredentialProviderValue     = "org.apache.solr.common.cloud.DigestZkCredentialsProvider"
	SolrCloudZKAclProviderKey              = "zkACLProvider"
	SolrCloudZKAclProviderValue            = "org.apache.solr.common.cloud.DigestZkACLProvider"
	SolrCloudZKCredentialsInjectorKey      = "zkCredentialsInjector"
	SolrCloudZKCredentialsInjectorValue    = "org.apache.solr.common.cloud.VMParamsZkCredentialsInjector"

	ShardHandlerFactorySocketTimeoutKey   = "socketTimeout"
	ShardHandlerFactorySocketTimeoutValue = 600000
	ShardHandlerFactoryConnTimeoutKey     = "connTimeout"
	ShardHandlerFactoryConnTimeoutValue   = 60000

	SolrKeysMaxBooleanClausesKey   = "maxBooleanClauses"
	SolrKeysMaxBooleanClausesValue = "solr.max.booleanClauses"
	SolrKeysSharedLibKey           = "sharedLib"
	SolrKeysShardLibValue          = "solr.sharedLib"
	SolrKeysHostPortKey            = "hostPort"
	SolrKeysHostPortValue          = "solr.port.advertise"
	SolrKeysAllowPathsKey          = "allowPaths"
	SolrKeysAllowPathsValue        = "solr.allowPaths"

	SolrConfMaxBooleanClausesKey   = "maxBooleanClauses"
	SolrConfMaxBooleanClausesValue = 1024
	SolrConfAllowPathsKey          = "allowPaths"
	SolrConfAllowPathsValue        = ""
	SolrConfSolrCloudKey           = "solrcloud"
	SolrConfShardHandlerFactoryKey = "shardHandlerFactory"
)
View Source
const (
	DruidConfigDirCommon              = "/opt/druid/conf/druid/cluster/_common"
	DruidConfigDirCoordinatorOverlord = "/opt/druid/conf/druid/cluster/master/coordinator-overlord"
	DruidConfigDirHistoricals         = "/opt/druid/conf/druid/cluster/data/historical"
	DruidConfigDirMiddleManagers      = "/opt/druid/conf/druid/cluster/data/middleManager"
	DruidConfigDirBrokers             = "/opt/druid/conf/druid/cluster/query/broker"
	DruidConfigDirRouters             = "/opt/druid/conf/druid/cluster/query/router"
	DruidCConfigDirMySQLMetadata      = "/opt/druid/extensions/mysql-metadata-storage"

	DruidVolumeOperatorConfig = "operator-config-volume"
	DruidVolumeMainConfig     = "main-config-volume"
	DruidVolumeCustomConfig   = "custom-config"

	DruidOperatorConfigDir = "/tmp/config/operator-config"
	DruidMainConfigDir     = "/opt/druid/conf"
	DruidCustomConfigDir   = "/tmp/config/custom-config"

	DruidVolumeCommonConfig          = "common-config-volume"
	DruidCommonConfigFile            = "common.runtime.properties"
	DruidCoordinatorsJVMConfigFile   = "coordinators.jvm.config"
	DruidHistoricalsJVMConfigFile    = "historicals.jvm.config"
	DruidBrokersJVMConfigFile        = "brokers.jvm.config"
	DruidMiddleManagersJVMConfigFile = "middleManagers.jvm.config"
	DruidRoutersJVMConfigFile        = "routers.jvm.config"
	DruidCoordinatorsConfigFile      = "coordinators.properties"
	DruidHistoricalsConfigFile       = "historicals.properties"
	DruidMiddleManagersConfigFile    = "middleManagers.properties"
	DruidBrokersConfigFile           = "brokers.properties"
	DruidRoutersConfigFile           = "routers.properties"
	DruidVolumeMySQLMetadataStorage  = "mysql-metadata-storage"

	DruidContainerName     = "druid"
	DruidInitContainerName = "init-druid"
	DruidUserAdmin         = "admin"

	EnvDruidAdminPassword          = "DRUID_ADMIN_PASSWORD"
	EnvDruidMetdataStoragePassword = "DRUID_METADATA_STORAGE_PASSWORD"
	EnvDruidZKServicePassword      = "DRUID_ZK_SERVICE_PASSWORD"
	EnvDruidCoordinatorAsOverlord  = "DRUID_COORDINATOR_AS_OVERLORD"

	DruidPortCoordinators   = 8081
	DruidPortOverlords      = 8090
	DruidPortHistoricals    = 8083
	DruidPortMiddleManagers = 8091
	DruidPortBrokers        = 8082
	DruidPortRouters        = 8888

	// Common Runtime Configurations Properties
	// ZooKeeperSpec
	DruidZKServiceHost              = "druid.zk.service.host"
	DruidZKPathsBase                = "druid.zk.paths.base"
	DruidZKServiceCompress          = "druid.zk.service.compress"
	DruidZKServiceUserKey           = "druid.zk.service.user"
	DruidZKServicePasswordKey       = "druid.zk.service.pwd"
	DruidZKServicePasswordEnvConfig = "{\"type\": \"environment\", \"variable\": \"DRUID_ZK_SERVICE_PASSWORD\"}"

	// Metadata Storage
	DruidMetadataStorageTypeKey                    = "druid.metadata.storage.type"
	DruidMetadataStorageConnectorConnectURI        = "druid.metadata.storage.connector.connectURI"
	DruidMetadataStorageConnectURIPrefixMySQL      = "jdbc:mysql://"
	DruidMetadataStorageConnectURIPrefixPostgreSQL = "jdbc:postgresql://"
	DruidMetadataStorageConnectorUser              = "druid.metadata.storage.connector.user"
	DruidMetadataStorageConnectorPassword          = "druid.metadata.storage.connector.password"
	DruidMetadataStorageConnectorPasswordEnvConfig = "{\"type\": \"environment\", \"variable\": \"DRUID_METADATA_STORAGE_PASSWORD\"}"
	DruidMetadataStorageCreateTables               = "druid.metadata.storage.connector.createTables"

	// Deep Storage
	DruidDeepStorageTypeKey      = "druid.storage.type"
	DruidDeepStorageTypeS3       = "s3"
	DruidDeepStorageBaseKey      = "druid.storage.baseKey"
	DruidDeepStorageBucket       = "druid.storage.bucket"
	DruidS3AccessKey             = "druid.s3.accessKey"
	DruidS3SecretKey             = "druid.s3.secretKey"
	DruidS3EndpointSigningRegion = "druid.s3.endpoint.signingRegion"
	DruidS3EnablePathStyleAccess = "druid.s3.enablePathStyleAccess"
	DruidS3EndpointURL           = "druid.s3.endpoint.url"

	// Indexing service logs
	DruidIndexerLogsType           = "druid.indexer.logs.type"
	DruidIndexerLogsS3Bucket       = "druid.indexer.logs.s3Bucket"
	DruidIndexerLogsS3Prefix       = "druid.indexer.logs.s3Prefix"
	DruidEnableLookupSyncOnStartup = "druid.lookup.enableLookupSyncOnStartup"

	// Authentication
	DruidAuthAuthenticationChain                             = "druid.auth.authenticatorChain"
	DruidAuthAuthenticationChainValueBasic                   = "[\"basic\"]"
	DruidAuthAuthenticatorBasicType                          = "druid.auth.authenticator.basic.type"
	DruidAuthAuthenticatorBasicTypeValue                     = "basic"
	DruidAuthAuthenticatorBasicInitialAdminPassword          = "druid.auth.authenticator.basic.initialAdminPassword"
	DruidAuthAuthenticatorBasicInitialAdminPasswordEnvConfig = "{\"type\": \"environment\", \"variable\": \"DRUID_ADMIN_PASSWORD\"}"
	DruidAuthAuthenticatorBasicInitialInternalClientPassword = "druid.auth.authenticator.basic.initialInternalClientPassword"
	DruidAuthAuthenticatorBasicCredentialsValidatorType      = "druid.auth.authenticator.basic.credentialsValidator.type"
	DruidAuthAuthenticatorBasicSkipOnFailure                 = "druid.auth.authenticator.basic.skipOnFailure"
	DruidAuthAuthenticatorBasicAuthorizerName                = "druid.auth.authenticator.basic.authorizerName"

	// Escalator
	DruidAuthEscalatorType                   = "druid.escalator.type"
	DruidAuthEscalatorInternalClientUsername = "druid.escalator.internalClientUsername"
	DruidAuthEscalatorInternalClientPassword = "druid.escalator.internalClientPassword"
	DruidAuthEscalatorAuthorizerName         = "druid.escalator.authorizerName"
	DruidAuthAuthorizers                     = "druid.auth.authorizers"
	DruidAuthAuthorizerBasicType             = "druid.auth.authorizer.basic.type"

	// Extension Load List
	DruidExtensionLoadListKey               = "druid.extensions.loadList"
	DruidExtensionLoadList                  = "" /* 217-byte string literal not displayed */
	DruidExtensionAvro                      = "druid-avro-extensions"
	DruidExtensionS3                        = "druid-s3-extensions"
	DruidExtensionHDFS                      = "druid-hdfs-storage"
	DruidExtensionGoogle                    = "druid-google-extensions"
	DruidExtensionAzure                     = "druid-azure-extensions"
	DruidExtensionKafkaIndexingService      = "druid-kafka-indexing-service"
	DruidExtensionDataSketches              = "druid-datasketches"
	DruidExtensionKubernetes                = "druid-kubernetes-extensions"
	DruidExtensionMySQLMetadataStorage      = "mysql-metadata-storage"
	DruidExtensionPostgreSQLMetadataStorage = "postgresql-metadata-storage"
	DruidExtensionBasicSecurity             = "druid-basic-security"
	DruidExtensionMultiStageQuery           = "druid-multi-stage-query"
	DruidExtensionPrometheusEmitter         = "prometheus-emitter"
	DruidService                            = "druid.service"

	// Monitoring Configurations
	DruidEmitter                                = "druid.emitter"
	DruidEmitterPrometheus                      = "prometheus"
	DruidEmitterPrometheusPortKey               = "druid.emitter.prometheus.port"
	DruidEmitterPrometheusPortVal               = 8080
	DruidMonitoringMonitorsKey                  = "druid.monitoring.monitors"
	DruidEmitterPrometheusStrategy              = "druid.emitter.prometheus.strategy"
	DruidMetricsJVMMonitor                      = "org.apache.druid.java.util.metrics.JvmMonitor"
	DruidMetricsServiceStatusMonitor            = "org.apache.druid.server.metrics.ServiceStatusMonitor"
	DruidMetricsQueryCountStatsMonitor          = "org.apache.druid.server.metrics.QueryCountStatsMonitor"
	DruidMonitoringHistoricalMetricsMonitor     = "org.apache.druid.server.metrics.HistoricalMetricsMonitor"
	DruidMonitoringSegmentsStatsMonitor         = "org.apache.druid.server.metrics.SegmentStatsMonitor"
	DruidMonitoringWorkerTaskCountsStatsMonitor = "org.apache.druid.server.metrics.WorkerTaskCountStatsMonitor"
	DruidMonitoringQueryCountStatsMonitor       = "org.apache.druid.server.metrics.QueryCountStatsMonitor"
	DruidMonitoringTaskCountStatsMonitor        = "org.apache.druid.server.metrics.TaskCountStatsMonitor"
	DruidMonitoringSysMonitor                   = "org.apache.druid.java.util.metrics.SysMonitor"

	/// Coordinators Configurations
	DruidCoordinatorStartDelay                = "druid.coordinator.startDelay"
	DruidCoordinatorPeriod                    = "druid.coordinator.period"
	DruidIndexerQueueStartDelay               = "druid.indexer.queue.startDelay"
	DruidManagerSegmentsPollDuration          = "druid.manager.segments.pollDuration"
	DruidCoordinatorKillAuditLogOn            = "druid.coordinator.kill.audit.on"
	DruidMillisToWaitBeforeDeleting           = "millisToWaitBeforeDeleting"
	DruidCoordinatorAsOverlord                = "druid.coordinator.asOverlord.enabled"
	DruidCoordinatorAsOverlordOverlordService = "druid.coordinator.asOverlord.overlordService"

	/// Overlords Configurations
	DruidServiceNameOverlords            = "druid/overlord"
	DruidIndexerStorageType              = "druid.indexer.storage.type"
	DruidIndexerAuditLogEnabled          = "druid.indexer.auditLog.enabled"
	DruidIndexerLogsKillEnables          = "druid.indexer.logs.kill.enabled"
	DruidIndexerLogsKillDurationToRetain = "druid.indexer.logs.kill.durationToRetain"
	DruidIndexerLogsKillInitialDelay     = "druid.indexer.logs.kill.initialDelay"
	DruidIndexerLogsKillDelay            = "druid.indexer.logs.kill.delay"

	DruidEmitterLoggingLogLevel = "druid.emitter.logging.logLevel"

	/// Historicals Configurations
	// Properties
	DruidProcessingNumOfThreads = "druid.processing.numThreads"

	// Segment Cache
	DruidHistoricalsSegmentCacheLocations              = "druid.segmentCache.locations"
	DruidHistoricalsSegmentCacheDropSegmentDelayMillis = "druid.segmentCache.dropSegmentDelayMillis"
	DruidHistoricalsSegmentCacheDir                    = "/druid/data/segments"
	DruidVolumeHistoricalsSegmentCache                 = "segment-cache"

	// Query Cache
	DruidHistoricalCacheUseCache      = "druid.historical.cache.useCache"
	DruidHistoricalCachePopulateCache = "druid.historical.cache.populateCache"
	DruidCacheSizeInBytes             = "druid.cache.sizeInBytes"

	// Values
	DruidSegmentCacheLocationsDefaultValue = "[{\"path\":\"/druid/data/segments\",\"maxSize\":10737418240}]"

	/// MiddleManagers Configurations
	// Properties
	DruidWorkerCapacity                                    = "druid.worker.capacity"
	DruidIndexerTaskBaseTaskDir                            = "druid.indexer.task.baseTaskDir"
	DruidWorkerTaskBaseTaskDirKey                          = "druid.worker.task.baseTaskDir"
	DruidWorkerTaskBaseTaskDir                             = "/var/druid/task"
	DruidWorkerBaseTaskDirSize                             = "druid.worker.baseTaskDirSize"
	DruidIndexerForkPropertyDruidProcessingBufferSizeBytes = "druid.indexer.fork.property.druid.processing.buffer.sizeBytes"
	DruidMiddleManagersVolumeBaseTaskDir                   = "base-task-dir"
	DruidVolumeMiddleManagersBaseTaskDir                   = "base-task-dir"

	// Values
	DruidIndexerTaskBaseTaskDirValue = "/druid/data/baseTaskDir"

	/// Brokers Configurations
	DruidBrokerHTTPNumOfConnections = "druid.broker.http.numConnections"
	DruidSQLEnable                  = "druid.sql.enable"

	/// Routers Configurations
	DruidRouterHTTPNumOfConnections = "druid.router.http.numConnections"
	DruidRouterHTTPNumOfMaxThreads  = "druid.router.http.numMaxThreads"

	// Common Nodes Configurations
	// Properties
	DruidPlaintextPort               = "druid.plaintextPort"
	DruidProcessingBufferSizeBytes   = "druid.processing.buffer.sizeBytes"
	DruidProcessingNumOfMergeBuffers = "druid.processing.numMergeBuffers"
	DruidServerHTTPNumOfThreads      = "druid.server.http.numThreads"

	// Health Check
	DruidHealthDataZero = "0"
	DruidHealthDataOne  = "1"
)

=========================== Druid Constants ============================

View Source
const (
	RabbitMQAMQPPort          = 5672
	RabbitMQPeerDiscoveryPort = 4369
	RabbitMQManagementUIPort  = 15672
	RabbitMQExporterPort      = 15692
	RabbitMQInterNodePort     = 25672

	RabbitMQVolumeData         = "data"
	RabbitMQVolumeConfig       = "rabbitmqconfig"
	RabbitMQVolumeTempConfig   = "temp-config"
	RabbitMQVolumeCustomConfig = "custom-config"

	RabbitMQDataDir         = "/var/lib/rabbitmq/mnesia"
	RabbitMQConfigDir       = "/config/"
	RabbitMQPluginsDir      = "/etc/rabbitmq/"
	RabbitMQCertDir         = "/var/private/ssl"
	RabbitMQTempConfigDir   = "/tmp/config/"
	RabbitMQCustomConfigDir = "/tmp/config/custom_config/"

	RabbitMQConfigVolName     = "rabbitmq-config"
	RabbitMQPluginsVolName    = "rabbitmq-plugins"
	RabbitMQTempConfigVolName = "temp-config"

	RabbitMQContainerName          = "rabbitmq"
	RabbitMQInitContainerName      = "rabbitmq-init"
	RabbitMQManagementPlugin       = "rabbitmq_management"
	RabbitMQPeerdiscoveryPlugin    = "rabbitmq_peer_discovery_k8s"
	RabbitMQLoopBackUserKey        = "loopback_users"
	RabbitMQLoopBackUserVal        = "none"
	RabbitMQDefaultTCPListenerKey  = "listeners.tcp.default"
	RabbitMQDefaultTCPListenerVal  = "5672"
	RabbitMQQueueMasterLocatorKey  = "queue_master_locator"
	RabbitMQQueueMasterLocatorVal  = "min-masters"
	RabbitMQDiskFreeLimitKey       = "disk_free_limit.absolute"
	RabbitMQDiskFreeLimitVal       = "2GB"
	RabbitMQPartitionHandingKey    = "cluster_partition_handling"
	RabbitMQPartitionHandingVal    = "pause_minority"
	RabbitMQPeerDiscoveryKey       = "cluster_formation.peer_discovery_backend"
	RabbitMQPeerDiscoveryVal       = "rabbit_peer_discovery_k8s"
	RabbitMQK8sHostKey             = "cluster_formation.k8s.host"
	RabbitMQK8sHostVal             = "kubernetes.default.svc.cluster.local"
	RabbitMQK8sAddressTypeKey      = "cluster_formation.k8s.address_type"
	RabbitMQK8sAddressTypeVal      = "hostname"
	RabbitMQNodeCleanupWarningKey  = "cluster_formation.node_cleanup.only_log_warning"
	RabbitMQNodeCleanupWarningVal  = "true"
	RabbitMQLogFileLevelKey        = "log.file.level"
	RabbitMQLogFileLevelVal        = "info"
	RabbitMQLogConsoleKey          = "log.console"
	RabbitMQLogConsoleVal          = "true"
	RabbitMQLogConsoleLevelKey     = "log.console.level"
	RabbitMQLogConsoleLevelVal     = "info"
	RabbitMQDefaultUserKey         = "default_user"
	RabbitMQDefaultUserVal         = "$(RABBITMQ_DEFAULT_USER)"
	RabbitMQDefaultPasswordKey     = "default_pass"
	RabbitMQDefaultPasswordVal     = "$(RABBITMQ_DEFAULT_PASS)"
	RabbitMQClusterNameKey         = "cluster_name"
	RabbitMQK8sSvcNameKey          = "cluster_formation.k8s.service_name"
	RabbitMQConfigFileName         = "rabbitmq.conf"
	RabbitMQEnabledPluginsFileName = "enabled_plugins"
	RabbitMQHealthCheckerQueueName = "kubedb-system"
)
View Source
const (

	// envs
	EnvFerretDBUser     = "FERRETDB_PG_USER"
	EnvFerretDBPassword = "FERRETDB_PG_PASSWORD"
	EnvFerretDBHandler  = "FERRETDB_HANDLER"
	EnvFerretDBPgURL    = "FERRETDB_POSTGRESQL_URL"
	EnvFerretDBTLSPort  = "FERRETDB_LISTEN_TLS"
	EnvFerretDBCAPath   = "FERRETDB_LISTEN_TLS_CA_FILE"
	EnvFerretDBCertPath = "FERRETDB_LISTEN_TLS_CERT_FILE"
	EnvFerretDBKeyPath  = "FERRETDB_LISTEN_TLS_KEY_FILE"

	FerretDBContainerName = "ferretdb"
	FerretDBMainImage     = "ghcr.io/ferretdb/ferretdb"
	FerretDBUser          = "postgres"

	FerretDBServerPath = "/etc/certs/server"

	FerretDBDefaultPort = 27017
	FerretDBMetricsPort = 8080
	FerretDBTLSPort     = 27018

	FerretDBMetricsPath = "/debug/metrics"
)

=========================== FerretDB Constants ============================

View Source
const (
	ResourceKindStatefulSet = "StatefulSet"
	ResourceKindPetSet      = "PetSet"
)

Resource kind related constants

View Source
const (
	InitFromGit          = "init-from-git"
	InitFromGitMountPath = "/git"
	GitSecretVolume      = "git-secret"
	GitSecretMountPath   = "/etc/git-secret"
	GitSyncContainerName = "git-sync"
)
View Source
const (
	ResourceCodeDruid     = "dr"
	ResourceKindDruid     = "Druid"
	ResourceSingularDruid = "druid"
	ResourcePluralDruid   = "druids"
)
View Source
const (
	ResourceCodeElasticsearch     = "es"
	ResourceKindElasticsearch     = "Elasticsearch"
	ResourceSingularElasticsearch = "elasticsearch"
	ResourcePluralElasticsearch   = "elasticsearches"
)
View Source
const (
	ResourceCodeEtcd     = "etc"
	ResourceKindEtcd     = "Etcd"
	ResourceSingularEtcd = "etcd"
	ResourcePluralEtcd   = "etcds"
)
View Source
const (
	ResourceCodeFerretDB     = "fr"
	ResourceKindFerretDB     = "FerretDB"
	ResourceSingularFerretDB = "ferretdb"
	ResourcePluralFerretDB   = "ferretdbs"
)
View Source
const (
	ResourceCodeKafka     = "kf"
	ResourceKindKafka     = "Kafka"
	ResourceSingularKafka = "kafka"
	ResourcePluralKafka   = "kafkas"
)
View Source
const (
	ResourceCodeMariaDB     = "md"
	ResourceKindMariaDB     = "MariaDB"
	ResourceSingularMariaDB = "mariadb"
	ResourcePluralMariaDB   = "mariadbs"
)
View Source
const (
	ResourceCodeMemcached     = "mc"
	ResourceKindMemcached     = "Memcached"
	ResourceSingularMemcached = "memcached"
	ResourcePluralMemcached   = "memcacheds"
)
View Source
const (
	TLSCAKeyFileName    = "ca.key"
	TLSCACertFileName   = "ca.crt"
	MongoPemFileName    = "mongo.pem"
	MongoClientFileName = "client.pem"
	MongoCertDirectory  = "/var/run/mongodb/tls"

	MongoDBShardLabelKey  = "mongodb.kubedb.com/node.shard"
	MongoDBConfigLabelKey = "mongodb.kubedb.com/node.config"
	MongoDBMongosLabelKey = "mongodb.kubedb.com/node.mongos"
	MongoDBTypeLabelKey   = "mongodb.kubedb.com/node.type"

	MongoDBShardAffinityTemplateVar = "SHARD_INDEX"
)
View Source
const (
	ResourceCodeMongoDB     = "mg"
	ResourceKindMongoDB     = "MongoDB"
	ResourceSingularMongoDB = "mongodb"
	ResourcePluralMongoDB   = "mongodbs"
)
View Source
const (
	ResourceCodeMySQL     = "my"
	ResourceKindMySQL     = "MySQL"
	ResourceSingularMySQL = "mysql"
	ResourcePluralMySQL   = "mysqls"
)
View Source
const (
	ResourceCodePerconaXtraDB     = "px"
	ResourceKindPerconaXtraDB     = "PerconaXtraDB"
	ResourceSingularPerconaXtraDB = "perconaxtradb"
	ResourcePluralPerconaXtraDB   = "perconaxtradbs"
)
View Source
const (
	ResourceCodePgBouncer     = "pb"
	ResourceKindPgBouncer     = "PgBouncer"
	ResourceSingularPgBouncer = "pgbouncer"
	ResourcePluralPgBouncer   = "pgbouncers"
)
View Source
const (
	ResourceCodePgpool     = "pp"
	ResourceKindPgpool     = "Pgpool"
	ResourceSingularPgpool = "pgpool"
	ResourcePluralPgpool   = "pgpools"
)
View Source
const (
	ResourceCodePostgres     = "pg"
	ResourceKindPostgres     = "Postgres"
	ResourceSingularPostgres = "postgres"
	ResourcePluralPostgres   = "postgreses"
)
View Source
const (
	ResourceCodeProxySQL     = "prx"
	ResourceKindProxySQL     = "ProxySQL"
	ResourceSingularProxySQL = "proxysql"
	ResourcePluralProxySQL   = "proxysqls"
)
View Source
const (
	ResourceCodeRabbitmq     = "rm"
	ResourceKindRabbitmq     = "RabbitMQ"
	ResourceSingularRabbitmq = "rabbitmq"
	ResourcePluralRabbitmq   = "rabbitmqs"
)
View Source
const (
	ResourceCodeRedisSentinel     = "rds"
	ResourceKindRedisSentinel     = "RedisSentinel"
	ResourceSingularRedisSentinel = "redissentinel"
	ResourcePluralRedisSentinel   = "redissentinels"
)
View Source
const (
	ResourceCodeRedis     = "rd"
	ResourceKindRedis     = "Redis"
	ResourceSingularRedis = "redis"
	ResourcePluralRedis   = "redises"
)
View Source
const (
	ResourceCodeSinglestore     = "sdb"
	ResourceKindSinglestore     = "Singlestore"
	ResourceSingularSinglestore = "singlestore"
	ResourcePluralSinglestore   = "singlestores"
)
View Source
const (
	ResourceCodeZooKeeper     = "zk"
	ResourceKindZooKeeper     = "ZooKeeper"
	ResourceSingularZooKeeper = "zookeeper"
	ResourcePluralZooKeeper   = "zookeepers"
)
View Source
const (
	ElasticsearchNodeAffinityTemplateVar = "NODE_ROLE"
)
View Source
const (
	RedisShardAffinityTemplateVar = "SHARD_INDEX"
)

Variables

View Source
var (
	DefaultInitContainerResource = core.ResourceRequirements{
		Requests: core.ResourceList{
			core.ResourceCPU:    resource.MustParse(".200"),
			core.ResourceMemory: resource.MustParse("256Mi"),
		},
		Limits: core.ResourceList{
			core.ResourceMemory: resource.MustParse("512Mi"),
		},
	}
	DefaultResources = core.ResourceRequirements{
		Requests: core.ResourceList{
			core.ResourceCPU:    resource.MustParse(".500"),
			core.ResourceMemory: resource.MustParse("1024Mi"),
		},
		Limits: core.ResourceList{
			core.ResourceMemory: resource.MustParse("1024Mi"),
		},
	}
	// CoordinatorDefaultResources must be used for raft backed coordinators to avoid unintended leader switches
	CoordinatorDefaultResources = core.ResourceRequirements{
		Requests: core.ResourceList{
			core.ResourceCPU:    resource.MustParse(".200"),
			core.ResourceMemory: resource.MustParse("256Mi"),
		},
		Limits: core.ResourceList{
			core.ResourceMemory: resource.MustParse("256Mi"),
		},
	}

	// DefaultResourcesCPUIntensive is for MongoDB versions >= 6
	DefaultResourcesCPUIntensive = core.ResourceRequirements{
		Requests: core.ResourceList{
			core.ResourceCPU:    resource.MustParse(".800"),
			core.ResourceMemory: resource.MustParse("1024Mi"),
		},
		Limits: core.ResourceList{
			core.ResourceMemory: resource.MustParse("1024Mi"),
		},
	}

	// DefaultResourcesMemoryIntensive must be used for elasticsearch
	// to avoid OOMKILLED while deploying ES V8
	DefaultResourcesMemoryIntensive = core.ResourceRequirements{
		Requests: core.ResourceList{
			core.ResourceCPU:    resource.MustParse(".500"),
			core.ResourceMemory: resource.MustParse("1.5Gi"),
		},
		Limits: core.ResourceList{
			core.ResourceMemory: resource.MustParse("1.5Gi"),
		},
	}

	// DefaultResourcesCoreAndMemoryIntensive must be used for Solr
	DefaultResourcesCoreAndMemoryIntensiveSolr = core.ResourceRequirements{
		Requests: core.ResourceList{
			core.ResourceCPU:    resource.MustParse(".900"),
			core.ResourceMemory: resource.MustParse("2Gi"),
		},
		Limits: core.ResourceList{
			core.ResourceMemory: resource.MustParse("2Gi"),
		},
	}

	// DefaultResourcesMemoryIntensiveSDB must be used for Singlestore when enabled monitoring or version >= 8.5.x
	DefaultResourcesMemoryIntensiveSDB = core.ResourceRequirements{
		Requests: core.ResourceList{
			core.ResourceCPU:    resource.MustParse(".500"),
			core.ResourceMemory: resource.MustParse("2Gi"),
		},
		Limits: core.ResourceList{
			core.ResourceMemory: resource.MustParse("2Gi"),
		},
	}
)
View Source
var (
	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
	SchemeBuilder runtime.SchemeBuilder

	AddToScheme = localSchemeBuilder.AddToScheme
)
View Source
var (
	DefaultClient client.Client
)
View Source
var PgpoolReservedVolumes = []string{
	PgpoolConfigVolumeName,
}
View Source
var PgpoolReservedVolumesMountPaths = []string{
	PgpoolConfigSecretMountPath,
}
View Source
var SchemeGroupVersion = schema.GroupVersion{Group: kubedb.GroupName, Version: "v1alpha2"}

Functions

func DefaultArbiter added in v0.38.0

func DefaultArbiter(computeOnly bool) core.ResourceRequirements

func FerretDBValidateEnvVar added in v0.41.0

func FerretDBValidateEnvVar(envs []core.EnvVar, forbiddenEnvs []string, resourceType string) error

func GetDatabasePods added in v0.24.0

func GetDatabasePods(db metav1.Object, stsLister appslister.StatefulSetLister, pods []core.Pod) ([]core.Pod, error)

func GetDatabasePodsByPetSetLister added in v0.44.0

func GetDatabasePodsByPetSetLister(db metav1.Object, psLister pslister.PetSetLister, pods []core.Pod) ([]core.Pod, error)

func GetServiceTemplate added in v0.15.0

func GetServiceTemplate(templates []NamedServiceTemplateSpec, alias ServiceAlias) ofst.ServiceTemplateSpec

GetServiceTemplate returns a pointer to the desired serviceTemplate referred by "aliaS". Otherwise, it returns nil.

func GetSharedBufferSizeForPostgres added in v0.19.0

func GetSharedBufferSizeForPostgres(resource *resource.Quantity) string

GetSharedBufferSizeForPostgres this func takes a input type int64 which is in bytes return the 25% of the input in Bytes

func HasServiceTemplate added in v0.15.0

func HasServiceTemplate(templates []NamedServiceTemplateSpec, alias ServiceAlias) bool

HasServiceTemplate returns "true" if the desired serviceTemplate provided in "aliaS" is present in the serviceTemplate list. Otherwise, it returns "false".

func Kind

func Kind(kind string) schema.GroupKind

Kind takes an unqualified kind and returns a Group qualified GroupKind

func MySQLExporterTLSArg added in v0.16.0

func MySQLExporterTLSArg() string

func MySQLRequireSSLArg added in v0.16.0

func MySQLRequireSSLArg() string

func PgpoolGetMainContainerEnvs added in v0.41.0

func PgpoolGetMainContainerEnvs(p *Pgpool) []core.EnvVar

func PgpoolValidateVersion added in v0.41.0

func PgpoolValidateVersion(p *Pgpool) error

func PgpoolValidateVolumes added in v0.41.0

func PgpoolValidateVolumes(p *Pgpool) error

func PgpoolValidateVolumesMountPaths added in v0.41.0

func PgpoolValidateVolumesMountPaths(podTemplate *ofst.PodTemplateSpec) error

func Resource

func Resource(resource string) schema.GroupResource

Resource takes an unqualified resource and returns a Group qualified GroupResource

func SetDefaultClient added in v0.38.0

func SetDefaultClient(kc client.Client)

func UsesAcmeIssuer added in v0.32.0

func UsesAcmeIssuer(kc client.Client, ns string, issuerRef core.TypedLocalObjectReference) (bool, error)

Types

type AddressType added in v0.18.0

type AddressType string

+kubebuilder:validation:Enum=DNS;IP;IPv4;IPv6

const (
	AddressTypeDNS AddressType = "DNS"
	// Uses spec.podIP as address for db pods.
	AddressTypeIP AddressType = "IP"
	// Uses first IPv4 address from spec.podIP, spec.podIPs fields as address for db pods.
	AddressTypeIPv4 AddressType = "IPv4"
	// Uses first IPv6 address from spec.podIP, spec.podIPs fields as address for db pods.
	AddressTypeIPv6 AddressType = "IPv6"
)

func (AddressType) IsIP added in v0.18.0

func (a AddressType) IsIP() bool

type Age added in v0.29.0

type Age struct {
	// Populated by Provisioner when authSecret is created or Ops Manager when authSecret is updated.
	LastUpdateTimestamp metav1.Time `json:"lastUpdateTimestamp,omitempty"`
}

func (*Age) DeepCopy added in v0.29.0

func (in *Age) DeepCopy() *Age

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Age.

func (*Age) DeepCopyInto added in v0.29.0

func (in *Age) DeepCopyInto(out *Age)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type AllowedConsumers added in v0.25.0

type AllowedConsumers struct {
	// Namespaces indicates namespaces from which Consumers may be attached to
	//
	// +optional
	// +kubebuilder:default={from: Same}
	Namespaces *ConsumerNamespaces `json:"namespaces,omitempty"`

	// Selector specifies a selector for consumers that are allowed to bind
	// to this database instance.
	//
	// +optional
	Selector *metav1.LabelSelector `json:"selector,omitempty"`
}

AllowedConsumers defines which consumers may refer to a database instance.

func (*AllowedConsumers) DeepCopy added in v0.25.0

func (in *AllowedConsumers) DeepCopy() *AllowedConsumers

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedConsumers.

func (*AllowedConsumers) DeepCopyInto added in v0.25.0

func (in *AllowedConsumers) DeepCopyInto(out *AllowedConsumers)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ArbiterSpec added in v0.38.0

type ArbiterSpec struct {
	// Compute Resources required by the sidecar container.
	// +optional
	Resources core.ResourceRequirements `json:"resources,omitempty"`
	// NodeSelector is a selector which must be true for the pod to fit on a node.
	// Selector which must match a node's labels for the pod to be scheduled on that node.
	// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
	// +optional
	// +mapType=atomic
	NodeSelector map[string]string `json:"nodeSelector,omitempty"`
	// If specified, the pod's tolerations.
	// +optional
	Tolerations []core.Toleration `json:"tolerations,omitempty"`
}

func (*ArbiterSpec) DeepCopy added in v0.38.0

func (in *ArbiterSpec) DeepCopy() *ArbiterSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArbiterSpec.

func (*ArbiterSpec) DeepCopyInto added in v0.38.0

func (in *ArbiterSpec) DeepCopyInto(out *ArbiterSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type Archiver added in v0.38.0

type Archiver struct {
	// Pause is used to stop the archiver backup for the database
	// +optional
	Pause bool `json:"pause,omitempty"`
	// Ref is the name and namespace reference to the Archiver CR
	Ref kmapi.ObjectReference `json:"ref"`
}

func (*Archiver) DeepCopy added in v0.38.0

func (in *Archiver) DeepCopy() *Archiver

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Archiver.

func (*Archiver) DeepCopyInto added in v0.38.0

func (in *Archiver) DeepCopyInto(out *Archiver)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ArchiverRecovery added in v0.38.0

type ArchiverRecovery struct {
	RecoveryTimestamp metav1.Time `json:"recoveryTimestamp"`
	// +optional
	EncryptionSecret *kmapi.ObjectReference `json:"encryptionSecret,omitempty"`
	// +optional
	ManifestRepository *kmapi.ObjectReference `json:"manifestRepository,omitempty"`

	// FullDBRepository means db restore + manifest restore
	FullDBRepository *kmapi.ObjectReference `json:"fullDBRepository,omitempty"`
}

func (*ArchiverRecovery) DeepCopy added in v0.38.0

func (in *ArchiverRecovery) DeepCopy() *ArchiverRecovery

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchiverRecovery.

func (*ArchiverRecovery) DeepCopyInto added in v0.38.0

func (in *ArchiverRecovery) DeepCopyInto(out *ArchiverRecovery)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type AutoOpsSpec added in v0.28.0

type AutoOpsSpec struct {
	// Disabled specifies whether the ops-request recommendation generation will be disabled or not.
	// +optional
	Disabled bool `json:"disabled,omitempty"`
}

AutoOpsSpec defines the specifications of automatic ops-request recommendation generation

func (*AutoOpsSpec) DeepCopy added in v0.28.0

func (in *AutoOpsSpec) DeepCopy() *AutoOpsSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoOpsSpec.

func (*AutoOpsSpec) DeepCopyInto added in v0.28.0

func (in *AutoOpsSpec) DeepCopyInto(out *AutoOpsSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterAuthMode

type ClusterAuthMode string

ClusterAuthMode represents the clusterAuthMode of mongodb clusters ( replicaset or sharding) ref: https://docs.mongodb.com/manual/reference/program/mongod/#cmdoption-mongod-clusterauthmode +kubebuilder:validation:Enum=keyFile;sendKeyFile;sendX509;x509

const (
	// ClusterAuthModeKeyFile represents `keyFile` mongodb clusterAuthMode. In this mode, Use a keyfile for authentication. Accept only keyfiles.
	ClusterAuthModeKeyFile ClusterAuthMode = "keyFile"

	// ClusterAuthModeSendKeyFile represents `sendKeyFile` mongodb clusterAuthMode.
	// This mode is for rolling upgrade purposes. Send a keyfile for authentication but can accept both keyfiles
	// and x.509 certificates.
	ClusterAuthModeSendKeyFile ClusterAuthMode = "sendKeyFile"

	// ClusterAuthModeSendX509 represents `sendx509` mongodb clusterAuthMode. This mode is usually for rolling upgrade purposes.
	// Send the x.509 certificate for authentication but can accept both keyfiles and x.509 certificates.
	ClusterAuthModeSendX509 ClusterAuthMode = "sendX509"

	// ClusterAuthModeX509 represents `x509` mongodb clusterAuthMode. This is the recommended clusterAuthMode.
	// Send the x.509 certificate for authentication and accept only x.509 certificates.
	ClusterAuthModeX509 ClusterAuthMode = "x509"
)

type ConnectionPoolConfig

type ConnectionPoolConfig struct {
	// Port is the port number on which PgBouncer listens to clients. Default: 5432.
	// +kubebuilder:default=5432
	// +optional
	Port *int32 `json:"port,omitempty"`
	// PoolMode is the pooling mechanism type. Default: session.
	// +kubebuilder:default="session"
	// +optional
	PoolMode string `json:"poolMode,omitempty"`
	// MaxClientConnections is the maximum number of allowed client connections. Default: 100.
	// +kubebuilder:default=100
	// +optional
	MaxClientConnections *int64 `json:"maxClientConnections,omitempty"`
	// DefaultPoolSize specifies how many server connections to allow per user/database pair. Default: 20.
	// +kubebuilder:default=20
	// +optional
	DefaultPoolSize *int64 `json:"defaultPoolSize,omitempty"`
	// MinPoolSize is used to add more server connections to pool if below this number. Default: 0 (disabled).
	// +kubebuilder:default=0
	// +optional
	MinPoolSize *int64 `json:"minPoolSize,omitempty"`
	// ReservePoolSize specifies how many additional connections to allow to a pool. 0 disables. Default: 0 (disabled).
	// +kubebuilder:default=0
	// +optional
	ReservePoolSize *int64 `json:"reservePoolSize,omitempty"`
	// ReservePoolTimeoutSeconds is the number of seconds in which if a client has not been serviced,
	// pgbouncer enables use of additional connections from reserve pool. 0 disables. Default: 5.0.
	// +kubebuilder:default=5
	// +optional
	ReservePoolTimeoutSeconds *int64 `json:"reservePoolTimeoutSeconds,omitempty"`
	// MaxDBConnections is the maximum number of connections allowed per-database. Default: 0 (unlimited).
	// +kubebuilder:default=0
	// +optional
	MaxDBConnections *int64 `json:"maxDBConnections,omitempty"`
	// MaxUserConnections is the maximum number of users allowed per-database. Default: 0 (unlimited).
	// +kubebuilder:default=0
	// +optional
	MaxUserConnections *int64 `json:"maxUserConnections,omitempty"`
	// StatsPeriodSeconds sets how often the averages shown in various SHOW commands are updated
	// and how often aggregated statistics are written to the log. Default: 60
	// +kubebuilder:default=60
	// +optional
	StatsPeriodSeconds *int64 `json:"statsPeriodSeconds,omitempty"`
	// AuthType specifies how to authenticate users. Default: md5 (md5+plain text).
	// +kubebuilder:default=md5
	// +optional
	AuthType PgBouncerClientAuthMode `json:"authType,omitempty"`
	// IgnoreStartupParameters specifies comma-separated startup parameters that
	// pgbouncer knows are handled by admin and it can ignore them. Default: empty
	// +kubebuilder:default="empty"
	// +optional
	IgnoreStartupParameters string `json:"ignoreStartupParameters,omitempty"`
}

func (*ConnectionPoolConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolConfig.

func (*ConnectionPoolConfig) DeepCopyInto

func (in *ConnectionPoolConfig) DeepCopyInto(out *ConnectionPoolConfig)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ConsumerNamespaces added in v0.25.0

type ConsumerNamespaces struct {
	// From indicates where Consumers will be selected for the database instance. Possible
	// values are:
	// * All: Consumers in all namespaces.
	// * Selector: Consumers in namespaces selected by the selector
	// * Same: Only Consumers in the same namespace
	//
	// +optional
	// +kubebuilder:default=Same
	From *FromNamespaces `json:"from,omitempty"`

	// Selector must be specified when From is set to "Selector". In that case,
	// only Consumers in Namespaces matching this Selector will be selected by the
	// database instance. This field is ignored for other values of "From".
	//
	// +optional
	Selector *metav1.LabelSelector `json:"selector,omitempty"`
}

ConsumerNamespaces indicate which namespaces Consumers should be selected from.

func (*ConsumerNamespaces) DeepCopy added in v0.25.0

func (in *ConsumerNamespaces) DeepCopy() *ConsumerNamespaces

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsumerNamespaces.

func (*ConsumerNamespaces) DeepCopyInto added in v0.25.0

func (in *ConsumerNamespaces) DeepCopyInto(out *ConsumerNamespaces)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type CoordinatorSpec added in v0.21.0

type CoordinatorSpec struct {
	// Compute Resources required by coordinator container.
	// Cannot be updated.
	// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
	// +optional
	Resources core.ResourceRequirements `json:"resources,omitempty"`

	// Security options the coordinator container should run with.
	// More info: https://kubernetes.io/docs/concepts/policy/security-context/
	// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
	// +optional
	SecurityContext *core.SecurityContext `json:"securityContext,omitempty"`
}

CoordinatorSpec defines attributes of the coordinator container

func (*CoordinatorSpec) DeepCopy added in v0.21.0

func (in *CoordinatorSpec) DeepCopy() *CoordinatorSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoordinatorSpec.

func (*CoordinatorSpec) DeepCopyInto added in v0.21.0

func (in *CoordinatorSpec) DeepCopyInto(out *CoordinatorSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type DatabasePhase

type DatabasePhase string

+kubebuilder:validation:Enum=Provisioning;DataRestoring;Ready;Critical;NotReady;Halted;Unknown

const (
	// used for Databases that are currently provisioning
	DatabasePhaseProvisioning DatabasePhase = "Provisioning"
	// used for Databases for which data is currently restoring
	DatabasePhaseDataRestoring DatabasePhase = "DataRestoring"
	// used for Databases that are currently ReplicaReady, AcceptingConnection and Ready
	DatabasePhaseReady DatabasePhase = "Ready"
	// used for Databases that can connect, ReplicaReady == false || Ready == false (eg, ES yellow)
	DatabasePhaseCritical DatabasePhase = "Critical"
	// used for Databases that can't connect
	DatabasePhaseNotReady DatabasePhase = "NotReady"
	// used for Databases that are halted
	DatabasePhaseHalted DatabasePhase = "Halted"
	// used for Databases for which Phase can't be calculated
	DatabasePhaseUnknown DatabasePhase = "Unknown"
)

type Databases

type Databases struct {
	// Alias to uniquely identify a target database running inside a specific Postgres instance.
	Alias string `json:"alias"`
	// DatabaseRef specifies the database appbinding reference in any namespace.
	DatabaseRef appcat.AppReference `json:"databaseRef"`
	// DatabaseName is the name of the target database inside a Postgres instance.
	DatabaseName string `json:"databaseName"`
}

func (*Databases) DeepCopy

func (in *Databases) DeepCopy() *Databases

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Databases.

func (*Databases) DeepCopyInto

func (in *Databases) DeepCopyInto(out *Databases)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type DeepStorageSpec added in v0.41.0

type DeepStorageSpec struct {
	// Specifies the storage type to be used by druid
	// Possible values: s3, google, azure, hdfs
	Type DruidDeepStorageType `json:"type"`

	// deepStorage.configSecret should contain the necessary data
	// to connect to the deep storage
	// +optional
	ConfigSecret *core.LocalObjectReference `json:"configSecret,omitempty"`
}

func (*DeepStorageSpec) DeepCopy added in v0.41.0

func (in *DeepStorageSpec) DeepCopy() *DeepStorageSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeepStorageSpec.

func (*DeepStorageSpec) DeepCopyInto added in v0.41.0

func (in *DeepStorageSpec) DeepCopyInto(out *DeepStorageSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type Druid added in v0.41.0

type Druid struct {
	metav1.TypeMeta   `json:",inline"`
	metav1.ObjectMeta `json:"metadata,omitempty"`

	Spec   DruidSpec   `json:"spec,omitempty"`
	Status DruidStatus `json:"status,omitempty"`
}

+kubebuilder:object:root=true +kubebuilder:subresource:status +kubebuilder:resource:shortName=dr,scope=Namespaced +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"

func (*Druid) AddDruidExtensionLoadList added in v0.41.0

func (d *Druid) AddDruidExtensionLoadList(druidExtensionLoadList string, extension string) string

func (*Druid) AppBindingMeta added in v0.41.0

func (d *Druid) AppBindingMeta() appcat.AppBindingMeta

func (*Druid) BrokersServiceName added in v0.41.0

func (d *Druid) BrokersServiceName() string

func (*Druid) ConfigSecretName added in v0.41.0

func (d *Druid) ConfigSecretName() string

func (*Druid) CoordinatorsServiceName added in v0.41.0

func (d *Druid) CoordinatorsServiceName() string

func (*Druid) CustomResourceDefinition added in v0.41.0

func (d *Druid) CustomResourceDefinition() *apiextensions.CustomResourceDefinition

func (*Druid) DeepCopy added in v0.41.0

func (in *Druid) DeepCopy() *Druid

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Druid.

func (*Druid) DeepCopyInto added in v0.41.0

func (in *Druid) DeepCopyInto(out *Druid)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*Druid) DeepCopyObject added in v0.41.0

func (in *Druid) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

func (*Druid) Default added in v0.41.0

func (d *Druid) Default()

Default implements webhook.Defaulter so a webhook will be registered for the type

func (*Druid) DefaultUserCredSecretName added in v0.41.0

func (d *Druid) DefaultUserCredSecretName(username string) string

func (*Druid) DruidNodeContainerPort added in v0.41.0

func (d *Druid) DruidNodeContainerPort(nodeRole DruidNodeRoleType) int32

func (*Druid) DruidNodeRoleString added in v0.41.0

func (d *Druid) DruidNodeRoleString(nodeRole DruidNodeRoleType) string

func (*Druid) DruidNodeRoleStringSingular added in v0.41.0

func (d *Druid) DruidNodeRoleStringSingular(nodeRole DruidNodeRoleType) string

func (*Druid) Finalizer added in v0.44.0

func (r *Druid) Finalizer() string

func (*Druid) GetConnectionScheme added in v0.41.0

func (d *Druid) GetConnectionScheme() string

func (*Druid) GetDruidSegmentCacheConfig added in v0.41.0

func (d *Druid) GetDruidSegmentCacheConfig() string

func (*Druid) GetDruidStorageSize added in v0.41.0

func (d *Druid) GetDruidStorageSize(storageSize string) string

func (*Druid) GetMetadataStorageConnectURI added in v0.41.0

func (d *Druid) GetMetadataStorageConnectURI(appbinding *appcat.AppBinding, metadataStorageType DruidMetadataStorageType) string

func (*Druid) GetMetadataStorageType added in v0.41.0

func (d *Druid) GetMetadataStorageType(metadataStorage string) DruidMetadataStorageType

func (*Druid) GetPersistentSecrets added in v0.41.0

func (d *Druid) GetPersistentSecrets() []string

func (*Druid) GetZKServiceHost added in v0.41.0

func (d *Druid) GetZKServiceHost(appbinding *appcat.AppBinding) string

func (*Druid) GoverningServiceName added in v0.41.0

func (d *Druid) GoverningServiceName() string

func (*Druid) OffShootLabels added in v0.41.0

func (d *Druid) OffShootLabels() map[string]string

func (*Druid) OffShootName added in v0.41.0

func (d *Druid) OffShootName() string

func (*Druid) OffShootSelectors added in v0.41.0

func (d *Druid) OffShootSelectors(extraSelectors ...map[string]string) map[string]string

func (Druid) OffshootLabels added in v0.41.0

func (d Druid) OffshootLabels() map[string]string

func (*Druid) OffshootSelectors added in v0.41.0

func (d *Druid) OffshootSelectors(extraSelectors ...map[string]string) map[string]string

func (*Druid) OverlordsServiceName added in v0.41.0

func (d *Druid) OverlordsServiceName() string

func (*Druid) Owner added in v0.41.0

func (d *Druid) Owner() *meta.OwnerReference

func (*Druid) PVCName added in v0.41.0

func (d *Druid) PVCName(alias string) string

func (*Druid) PetSetName added in v0.44.0

func (d *Druid) PetSetName(nodeRole DruidNodeRoleType) string

func (*Druid) PodControllerLabels added in v0.41.0

func (d *Druid) PodControllerLabels(extraLabels ...map[string]string) map[string]string

func (*Druid) PodLabels added in v0.41.0

func (d *Druid) PodLabels(extraLebels ...map[string]string) map[string]string

func (*Druid) ReplicasAreReady added in v0.41.0

func (d *Druid) ReplicasAreReady(lister pslister.PetSetLister) (bool, string, error)

func (*Druid) ResourceFQN added in v0.41.0

func (d *Druid) ResourceFQN() string

func (*Druid) ResourceKind added in v0.41.0

func (d *Druid) ResourceKind() string

func (*Druid) ResourcePlural added in v0.41.0

func (d *Druid) ResourcePlural() string

func (*Druid) ResourceSingular added in v0.41.0

func (d *Druid) ResourceSingular() string

func (*Druid) RoutersServiceName added in v0.41.0

func (d *Druid) RoutersServiceName() string

func (*Druid) ServiceAccountName added in v0.41.0

func (d *Druid) ServiceAccountName() string

func (*Druid) ServiceLabels added in v0.41.0

func (d *Druid) ServiceLabels(alias ServiceAlias, extraLabels ...map[string]string) map[string]string

func (*Druid) ServiceName added in v0.41.0

func (d *Druid) ServiceName() string

func (*Druid) SetDefaults added in v0.41.0

func (d *Druid) SetDefaults()

func (*Druid) SetHealthCheckerDefaults added in v0.41.0

func (d *Druid) SetHealthCheckerDefaults()

func (*Druid) StatsService added in v0.44.0

func (d *Druid) StatsService() mona.StatsAccessor

func (*Druid) StatsServiceLabels added in v0.44.0

func (d *Druid) StatsServiceLabels() map[string]string

func (*Druid) ValidateCreate added in v0.41.0

func (d *Druid) ValidateCreate() (admission.Warnings, error)

ValidateCreate implements webhook.Validator so a webhook will be registered for the type

func (*Druid) ValidateDelete added in v0.41.0

func (d *Druid) ValidateDelete() (admission.Warnings, error)

ValidateDelete implements webhook.Validator so a webhook will be registered for the type

func (*Druid) ValidateUpdate added in v0.41.0

func (d *Druid) ValidateUpdate(old runtime.Object) (admission.Warnings, error)

ValidateUpdate implements webhook.Validator so a webhook will be registered for the type

type DruidApp added in v0.41.0

type DruidApp struct {
	*Druid
}

func (*DruidApp) DeepCopy added in v0.41.0

func (in *DruidApp) DeepCopy() *DruidApp

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DruidApp.

func (*DruidApp) DeepCopyInto added in v0.41.0

func (in *DruidApp) DeepCopyInto(out *DruidApp)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (DruidApp) Name added in v0.41.0

func (d DruidApp) Name() string

func (DruidApp) Type added in v0.41.0

func (d DruidApp) Type() appcat.AppType

type DruidClusterTopology added in v0.41.0

type DruidClusterTopology struct {
	Coordinators *DruidNode `json:"coordinators"`
	// +optional
	Overlords *DruidNode `json:"overlords,omitempty"`

	MiddleManagers *DruidNode `json:"middleManagers"`

	Historicals *DruidNode `json:"historicals"`

	Brokers *DruidNode `json:"brokers"`
	// +optional
	Routers *DruidNode `json:"routers,omitempty"`
}

func (*DruidClusterTopology) DeepCopy added in v0.41.0

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DruidClusterTopology.

func (*DruidClusterTopology) DeepCopyInto added in v0.41.0

func (in *DruidClusterTopology) DeepCopyInto(out *DruidClusterTopology)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type DruidDeepStorageType added in v0.41.0

type DruidDeepStorageType string

+kubebuilder:validation:Enum=s3;google;azure;hdfs

const (
	DruidDeepStorageS3     DruidDeepStorageType = "s3"
	DruidDeepStorageGoogle DruidDeepStorageType = "google"
	DruidDeepStorageAzure  DruidDeepStorageType = "azure"
	DruidDeepStorageHDFS   DruidDeepStorageType = "hdfs"
)

type DruidList added in v0.41.0

type DruidList struct {
	metav1.TypeMeta `json:",inline"`
	metav1.ListMeta `json:"metadata,omitempty"`
	Items           []Druid `json:"items"`
}

DruidList contains a list of Druid

func (*DruidList) DeepCopy added in v0.41.0

func (in *DruidList) DeepCopy() *DruidList

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DruidList.

func (*DruidList) DeepCopyInto added in v0.41.0

func (in *DruidList) DeepCopyInto(out *DruidList)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*DruidList) DeepCopyObject added in v0.41.0

func (in *DruidList) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

type DruidMetadataStorageType added in v0.41.0

type DruidMetadataStorageType string

+kubebuilder:validation:Enum=MySQL;PostgreSQL

const (
	DruidMetadataStorageMySQL      DruidMetadataStorageType = "MySQL"
	DruidMetadataStoragePostgreSQL DruidMetadataStorageType = "PostgreSQL"
)

type DruidNode added in v0.41.0

type DruidNode struct {
	// Replicas represents number of replica for the specific type of node
	// +optional
	Replicas *int32 `json:"replicas,omitempty"`

	// Suffix to append with node name
	// +optional
	Suffix string `json:"suffix,omitempty"`

	// Storage to specify how storage shall be used.
	// +optional
	Storage *core.PersistentVolumeClaimSpec `json:"storage,omitempty"`

	// PodTemplate is an optional configuration for pods used to expose database
	// +optional
	PodTemplate ofst.PodTemplateSpec `json:"podTemplate,omitempty"`

	// NodeSelector is a selector which must be true for the pod to fit on a node.
	// Selector which must match a node's labels for the pod to be scheduled on that node.
	// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
	// +optional
	// +mapType=atomic
	NodeSelector map[string]string `json:"nodeSelector,omitempty"`
	// If specified, the pod's tolerations.
	// +optional
	Tolerations []core.Toleration `json:"tolerations,omitempty"`

	// PodPlacementPolicy is the reference of the podPlacementPolicy
	// +kubebuilder:default={name: "default"}
	// +optional
	PodPlacementPolicy *core.LocalObjectReference `json:"podPlacementPolicy,omitempty"`
}

func (*DruidNode) DeepCopy added in v0.41.0

func (in *DruidNode) DeepCopy() *DruidNode

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DruidNode.

func (*DruidNode) DeepCopyInto added in v0.41.0

func (in *DruidNode) DeepCopyInto(out *DruidNode)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type DruidNodeRoleType added in v0.41.0

type DruidNodeRoleType string

+kubebuilder:validation:Enum=coordinators;overlords;brokers;routers;middleManagers;historicals

const (
	DruidNodeRoleCoordinators   DruidNodeRoleType = "coordinators"
	DruidNodeRoleOverlords      DruidNodeRoleType = "overlords"
	DruidNodeRoleBrokers        DruidNodeRoleType = "brokers"
	DruidNodeRoleRouters        DruidNodeRoleType = "routers"
	DruidNodeRoleMiddleManagers DruidNodeRoleType = "middleManagers"
	DruidNodeRoleHistoricals    DruidNodeRoleType = "historicals"
)

type DruidPhase added in v0.41.0

type DruidPhase string

+kubebuilder:validation:Enum=Provisioning;Ready;NotReady;Critical

const (
	DruidPhaseProvisioning DruidPhase = "Provisioning"
	DruidPhaseReady        DruidPhase = "Ready"
	DruidPhaseNotReady     DruidPhase = "NotReady"
	DruidPhaseCritical     DruidPhase = "Critical"
)

type DruidSpec added in v0.41.0

type DruidSpec struct {

	// Version of Druid to be deployed.
	Version string `json:"version"`

	// Druid topology for node specification
	// +optional
	Topology *DruidClusterTopology `json:"topology,omitempty"`

	// StorageType can be durable (default) or ephemeral.
	StorageType StorageType `json:"storageType,omitempty"`

	// disable security. It disables authentication security of user.
	// If unset, default is false
	// +optional
	DisableSecurity *bool `json:"disableSecurity,omitempty"`

	// Database authentication secret
	// +optional
	AuthSecret *core.LocalObjectReference `json:"authSecret,omitempty"`

	// ConfigSecret is an optional field to provide custom configuration file for database (i.e. config.properties).
	// If specified, this file will be used as configuration file otherwise default configuration file will be used.
	// +optional
	ConfigSecret *core.LocalObjectReference `json:"configSecret,omitempty"`

	// MetadataStorage contains information for Druid to connect to external dependency metadata storage
	MetadataStorage *MetadataStorage `json:"metadataStorage"`

	// DeepStorage contains specification for druid to connect to the deep storage
	DeepStorage *DeepStorageSpec `json:"deepStorage"`

	// ZooKeeper contains information for Druid to connect to external dependency metadata storage
	// +optional
	ZookeeperRef *ZookeeperRef `json:"zookeeperRef,omitempty"`

	// PodTemplate is an optional configuration
	// +optional
	PodTemplate ofst.PodTemplateSpec `json:"podTemplate,omitempty"`

	// ServiceTemplates is an optional configuration for services used to expose database
	// +optional
	ServiceTemplates []NamedServiceTemplateSpec `json:"serviceTemplates,omitempty"`

	// Indicates that the database is halted and all offshoot Kubernetes resources except PVCs are deleted.
	// +optional
	Halted bool `json:"halted,omitempty"`

	// Monitor is used monitor database instance
	// +optional
	Monitor *mona.AgentSpec `json:"monitor,omitempty"`

	// TerminationPolicy controls the delete operation for database
	// +optional
	TerminationPolicy TerminationPolicy `json:"terminationPolicy,omitempty"`

	// HealthChecker defines attributes of the health checker
	// +optional
	// +kubebuilder:default={periodSeconds: 30, timeoutSeconds: 10, failureThreshold: 3}
	HealthChecker kmapi.HealthCheckSpec `json:"healthChecker"`
}

DruidSpec defines the desired state of Druid

func (*DruidSpec) DeepCopy added in v0.41.0

func (in *DruidSpec) DeepCopy() *DruidSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DruidSpec.

func (*DruidSpec) DeepCopyInto added in v0.41.0

func (in *DruidSpec) DeepCopyInto(out *DruidSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type DruidStatsService added in v0.44.0

type DruidStatsService struct {
	*Druid
}

func (*DruidStatsService) DeepCopy added in v0.44.0

func (in *DruidStatsService) DeepCopy() *DruidStatsService

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DruidStatsService.

func (*DruidStatsService) DeepCopyInto added in v0.44.0

func (in *DruidStatsService) DeepCopyInto(out *DruidStatsService)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (DruidStatsService) GetNamespace added in v0.44.0

func (ks DruidStatsService) GetNamespace() string

func (DruidStatsService) Path added in v0.44.0

func (ks DruidStatsService) Path() string

func (DruidStatsService) Scheme added in v0.44.0

func (ks DruidStatsService) Scheme() string

func (DruidStatsService) ServiceMonitorAdditionalLabels added in v0.44.0

func (ks DruidStatsService) ServiceMonitorAdditionalLabels() map[string]string

func (DruidStatsService) ServiceMonitorName added in v0.44.0

func (ks DruidStatsService) ServiceMonitorName() string

func (DruidStatsService) ServiceName added in v0.44.0

func (ks DruidStatsService) ServiceName() string

func (DruidStatsService) TLSConfig added in v0.44.0

func (ks DruidStatsService) TLSConfig() *promapi.TLSConfig

type DruidStatus added in v0.41.0

type DruidStatus struct {
	// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
	// Important: Run "make" to regenerate code after modifying this file
	// Specifies the current phase of the database
	// +optional
	Phase DruidPhase `json:"phase,omitempty"`
	// observedGeneration is the most recent generation observed for this resource. It corresponds to the
	// resource's generation, which is updated on mutation by the API Server.
	// +optional
	ObservedGeneration int64 `json:"observedGeneration,omitempty"`
	// Conditions applied to the database, such as approval or denial.
	// +optional
	Conditions []kmapi.Condition `json:"conditions,omitempty"`
	// +optional
	Gateway *Gateway `json:"gateway,omitempty"`
}

DruidStatus defines the observed state of Druid

func (*DruidStatus) DeepCopy added in v0.41.0

func (in *DruidStatus) DeepCopy() *DruidStatus

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DruidStatus.

func (*DruidStatus) DeepCopyInto added in v0.41.0

func (in *DruidStatus) DeepCopyInto(out *DruidStatus)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type Elasticsearch

type Elasticsearch struct {
	metav1.TypeMeta   `json:",inline,omitempty"`
	metav1.ObjectMeta `json:"metadata,omitempty"`
	Spec              ElasticsearchSpec   `json:"spec,omitempty"`
	Status            ElasticsearchStatus `json:"status,omitempty"`
}

+kubebuilder:object:root=true +kubebuilder:resource:path=elasticsearches,singular=elasticsearch,shortName=es,categories={datastore,kubedb,appscode,all} +kubebuilder:subresource:status +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"

func (Elasticsearch) AppBindingMeta

func (e Elasticsearch) AppBindingMeta() appcat.AppBindingMeta

func (*Elasticsearch) AsOwner added in v0.35.0

func (e *Elasticsearch) AsOwner() *metav1.OwnerReference

func (*Elasticsearch) CertSecretVolumeMountPath

func (e *Elasticsearch) CertSecretVolumeMountPath(configDir string, alias ElasticsearchCertificateAlias) string

returns the mountPath for certificate secrets. if configDir is "/usr/share/elasticsearch/config", mountPath will be, "/usr/share/elasticsearch/config/certs/<alias>".

func (*Elasticsearch) CertSecretVolumeName

func (e *Elasticsearch) CertSecretVolumeName(alias ElasticsearchCertificateAlias) string

returns the volume name for certificate secret. Values will be like: transport-certs, http-certs etc.

func (*Elasticsearch) CertificateName

func (e *Elasticsearch) CertificateName(alias ElasticsearchCertificateAlias) string

CertificateName returns the default certificate name and/or certificate secret name for a certificate alias

func (*Elasticsearch) ClientCertificateCN

func (e *Elasticsearch) ClientCertificateCN(alias ElasticsearchCertificateAlias) string

ClientCertificateCN returns the CN for a client certificate

func (*Elasticsearch) CombinedStatefulSetName added in v0.15.2

func (e *Elasticsearch) CombinedStatefulSetName() string

func (*Elasticsearch) ConfigSecretName

func (e *Elasticsearch) ConfigSecretName() string

returns the secret name for the default elasticsearch configuration

func (*Elasticsearch) CoordinatingStatefulSetName added in v0.19.0

func (e *Elasticsearch) CoordinatingStatefulSetName() string

func (Elasticsearch) CustomResourceDefinition

func (_ Elasticsearch) CustomResourceDefinition() *apiextensions.CustomResourceDefinition

func (*Elasticsearch) DataColdStatefulSetName added in v0.19.0

func (e *Elasticsearch) DataColdStatefulSetName() string

func (*Elasticsearch) DataContentStatefulSetName added in v0.19.0

func (e *Elasticsearch) DataContentStatefulSetName() string

func (*Elasticsearch) DataFrozenStatefulSetName added in v0.19.0

func (e *Elasticsearch) DataFrozenStatefulSetName() string

func (*Elasticsearch) DataHotStatefulSetName added in v0.19.0

func (e *Elasticsearch) DataHotStatefulSetName() string

func (Elasticsearch) DataSelectors added in v0.16.0

func (e Elasticsearch) DataSelectors() map[string]string

func (*Elasticsearch) DataStatefulSetName added in v0.15.2

func (e *Elasticsearch) DataStatefulSetName() string

func (*Elasticsearch) DataWarmStatefulSetName added in v0.19.0

func (e *Elasticsearch) DataWarmStatefulSetName() string

func (*Elasticsearch) DeepCopy

func (in *Elasticsearch) DeepCopy() *Elasticsearch

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Elasticsearch.

func (*Elasticsearch) DeepCopyInto

func (in *Elasticsearch) DeepCopyInto(out *Elasticsearch)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*Elasticsearch) DeepCopyObject

func (in *Elasticsearch) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

func (*Elasticsearch) DefaultUserCredSecretName added in v0.17.1

func (e *Elasticsearch) DefaultUserCredSecretName(userName string) string

returns the default secret name for the user credentials (ie. username, password) If username contains underscore (_), it will be replaced by hyphen (‐) for the Kubernetes naming convention.

func (Elasticsearch) GetAuthSecretName added in v0.29.0

func (e Elasticsearch) GetAuthSecretName() string

func (*Elasticsearch) GetCertSecretName added in v0.18.0

func (e *Elasticsearch) GetCertSecretName(alias ElasticsearchCertificateAlias) string

GetCertSecretName returns the secret name for a certificate alias if any, otherwise returns default certificate secret name for the given alias.

func (*Elasticsearch) GetConnectionScheme

func (e *Elasticsearch) GetConnectionScheme() string

func (*Elasticsearch) GetConnectionURL

func (e *Elasticsearch) GetConnectionURL() string

func (*Elasticsearch) GetMatchExpressions

func (e *Elasticsearch) GetMatchExpressions() []metav1.LabelSelectorRequirement

func (*Elasticsearch) GetPersistentSecrets

func (e *Elasticsearch) GetPersistentSecrets() []string

func (*Elasticsearch) GetUserCredSecretName added in v0.17.1

func (e *Elasticsearch) GetUserCredSecretName(username string) (string, error)

Return the secret name for the given user. Return error, if the secret name is missing.

func (Elasticsearch) GoverningServiceName

func (e Elasticsearch) GoverningServiceName() string

func (Elasticsearch) IngestSelectors added in v0.16.0

func (e Elasticsearch) IngestSelectors() map[string]string

func (*Elasticsearch) IngestStatefulSetName added in v0.15.2

func (e *Elasticsearch) IngestStatefulSetName() string

func (*Elasticsearch) InitialMasterNodes added in v0.17.0

func (e *Elasticsearch) InitialMasterNodes() []string

func (*Elasticsearch) MLStatefulSetName added in v0.19.0

func (e *Elasticsearch) MLStatefulSetName() string

func (*Elasticsearch) MasterDiscoveryServiceName added in v0.15.0

func (e *Elasticsearch) MasterDiscoveryServiceName() string

func (Elasticsearch) MasterSelectors added in v0.16.0

func (e Elasticsearch) MasterSelectors() map[string]string

func (*Elasticsearch) MasterStatefulSetName added in v0.15.2

func (e *Elasticsearch) MasterStatefulSetName() string

func (Elasticsearch) NodeRoleSpecificLabelKey added in v0.19.0

func (e Elasticsearch) NodeRoleSpecificLabelKey(roleType ElasticsearchNodeRoleType) string

func (Elasticsearch) NodeRoleSpecificSelectors added in v0.19.0

func (e Elasticsearch) NodeRoleSpecificSelectors(roleType ElasticsearchNodeRoleType) map[string]string

func (Elasticsearch) OffshootLabels

func (e Elasticsearch) OffshootLabels() map[string]string

func (Elasticsearch) OffshootName

func (e Elasticsearch) OffshootName() string

func (Elasticsearch) OffshootSelectors

func (e Elasticsearch) OffshootSelectors(extraSelectors ...map[string]string) map[string]string

func (Elasticsearch) PodControllerLabels added in v0.23.0

func (e Elasticsearch) PodControllerLabels(extraLabels ...map[string]string) map[string]string

func (Elasticsearch) PodLabels added in v0.23.0

func (e Elasticsearch) PodLabels(extraLabels ...map[string]string) map[string]string

func (*Elasticsearch) ReplicasAreReady

func (e *Elasticsearch) ReplicasAreReady(lister appslister.StatefulSetLister) (bool, string, error)

func (Elasticsearch) ResourceFQN added in v0.16.0

func (e Elasticsearch) ResourceFQN() string

func (Elasticsearch) ResourceKind

func (e Elasticsearch) ResourceKind() string

func (Elasticsearch) ResourcePlural

func (e Elasticsearch) ResourcePlural() string

func (Elasticsearch) ResourceShortCode

func (e Elasticsearch) ResourceShortCode() string

func (Elasticsearch) ResourceSingular

func (e Elasticsearch) ResourceSingular() string

func (Elasticsearch) ServiceLabels added in v0.23.0

func (e Elasticsearch) ServiceLabels(alias ServiceAlias, extraLabels ...map[string]string) map[string]string

func (Elasticsearch) ServiceName

func (e Elasticsearch) ServiceName() string

func (*Elasticsearch) SetDefaults

func (e *Elasticsearch) SetDefaults(esVersion *catalog.ElasticsearchVersion, topology *core_util.Topology)

func (*Elasticsearch) SetHealthCheckerDefaults added in v0.28.0

func (e *Elasticsearch) SetHealthCheckerDefaults()

func (*Elasticsearch) SetMetricsExporterDefaults added in v0.38.0

func (e *Elasticsearch) SetMetricsExporterDefaults(esVersion *catalog.ElasticsearchVersion)

func (*Elasticsearch) SetTLSDefaults

func (e *Elasticsearch) SetTLSDefaults(esVersion *catalog.ElasticsearchVersion)

set default tls configuration (ie. alias, secretName)

func (Elasticsearch) StatsService

func (e Elasticsearch) StatsService() mona.StatsAccessor

func (Elasticsearch) StatsServiceLabels

func (e Elasticsearch) StatsServiceLabels() map[string]string

func (*Elasticsearch) TransformStatefulSetName added in v0.19.0

func (e *Elasticsearch) TransformStatefulSetName() string

type ElasticsearchCertificateAlias

type ElasticsearchCertificateAlias string

+kubebuilder:validation:Enum=ca;transport;http;admin;client;archiver;metrics-exporter

const (
	ElasticsearchCACert              ElasticsearchCertificateAlias = "ca"
	ElasticsearchTransportCert       ElasticsearchCertificateAlias = "transport"
	ElasticsearchHTTPCert            ElasticsearchCertificateAlias = "http"
	ElasticsearchAdminCert           ElasticsearchCertificateAlias = "admin"
	ElasticsearchClientCert          ElasticsearchCertificateAlias = "client"
	ElasticsearchArchiverCert        ElasticsearchCertificateAlias = "archiver"
	ElasticsearchMetricsExporterCert ElasticsearchCertificateAlias = "metrics-exporter"
)

type ElasticsearchClusterTopology

type ElasticsearchClusterTopology struct {
	Master       ElasticsearchNode  `json:"master"`
	Ingest       ElasticsearchNode  `json:"ingest"`
	Data         *ElasticsearchNode `json:"data,omitempty"`
	DataContent  *ElasticsearchNode `json:"dataContent,omitempty"`
	DataHot      *ElasticsearchNode `json:"dataHot,omitempty"`
	DataWarm     *ElasticsearchNode `json:"dataWarm,omitempty"`
	DataCold     *ElasticsearchNode `json:"dataCold,omitempty"`
	DataFrozen   *ElasticsearchNode `json:"dataFrozen,omitempty"`
	ML           *ElasticsearchNode `json:"ml,omitempty"`
	Transform    *ElasticsearchNode `json:"transform,omitempty"`
	Coordinating *ElasticsearchNode `json:"coordinating,omitempty"`
}

func (*ElasticsearchClusterTopology) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterTopology.

func (*ElasticsearchClusterTopology) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*ElasticsearchClusterTopology) ToMap added in v0.19.0

ToMap returns ClusterTopology in a Map

type ElasticsearchInternalUser

type ElasticsearchInternalUser string
const (
	ElasticsearchInternalUserElastic              ElasticsearchInternalUser = "elastic"
	ElasticsearchInternalUserAdmin                ElasticsearchInternalUser = "admin"
	ElasticsearchInternalUserKibanaserver         ElasticsearchInternalUser = "kibanaserver"
	ElasticsearchInternalUserKibanaSystem         ElasticsearchInternalUser = "kibana_system"
	ElasticsearchInternalUserLogstashSystem       ElasticsearchInternalUser = "logstash_system"
	ElasticsearchInternalUserBeatsSystem          ElasticsearchInternalUser = "beats_system"
	ElasticsearchInternalUserApmSystem            ElasticsearchInternalUser = "apm_system"
	ElasticsearchInternalUserRemoteMonitoringUser ElasticsearchInternalUser = "remote_monitoring_user"
	ElasticsearchInternalUserKibanaro             ElasticsearchInternalUser = "kibanaro"
	ElasticsearchInternalUserLogstash             ElasticsearchInternalUser = "logstash"
	ElasticsearchInternalUserReadall              ElasticsearchInternalUser = "readall"
	ElasticsearchInternalUserSnapshotrestore      ElasticsearchInternalUser = "snapshotrestore"
	ElasticsearchInternalUserMetricsExporter      ElasticsearchInternalUser = "metrics_exporter"
)

type ElasticsearchList

type ElasticsearchList struct {
	metav1.TypeMeta `json:",inline"`
	metav1.ListMeta `json:"metadata,omitempty"`
	// Items is a list of Elasticsearch CRD objects
	Items []Elasticsearch `json:"items,omitempty"`
}

func (*ElasticsearchList) DeepCopy

func (in *ElasticsearchList) DeepCopy() *ElasticsearchList

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchList.

func (*ElasticsearchList) DeepCopyInto

func (in *ElasticsearchList) DeepCopyInto(out *ElasticsearchList)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*ElasticsearchList) DeepCopyObject

func (in *ElasticsearchList) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

type ElasticsearchNode

type ElasticsearchNode struct {
	// Replicas represents number of replica for this specific type of node
	// +optional
	Replicas *int32 `json:"replicas,omitempty"`
	// +optional
	Suffix string `json:"suffix,omitempty"`
	// HeapSizePercentage specifies both the initial heap allocation (-Xms) percentage and the maximum heap allocation (-Xmx) percentage.
	// Node level values have higher precedence than global values.
	// +optional
	HeapSizePercentage *int32 `json:"heapSizePercentage,omitempty"`
	// Storage to specify how storage shall be used.
	// +optional
	Storage *core.PersistentVolumeClaimSpec `json:"storage,omitempty"`
	// Compute Resources required by the sidecar container.
	// +optional
	Resources core.ResourceRequirements `json:"resources,omitempty"`
	// An eviction is allowed if at most "maxUnavailable" pods selected by
	// "selector" are unavailable after the eviction, i.e. even in absence of
	// the evicted pod. For example, one can prevent all voluntary evictions
	// by specifying 0. This is a mutually exclusive setting with "minAvailable".
	// +optional
	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`

	// NodeSelector is a selector which must be true for the pod to fit on a node.
	// Selector which must match a node's labels for the pod to be scheduled on that node.
	// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
	// +optional
	// +mapType=atomic
	NodeSelector map[string]string `json:"nodeSelector,omitempty"`
	// If specified, the pod's tolerations.
	// +optional
	Tolerations []core.Toleration `json:"tolerations,omitempty"`
}

func (*ElasticsearchNode) DeepCopy

func (in *ElasticsearchNode) DeepCopy() *ElasticsearchNode

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchNode.

func (*ElasticsearchNode) DeepCopyInto

func (in *ElasticsearchNode) DeepCopyInto(out *ElasticsearchNode)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ElasticsearchNodeRoleType added in v0.19.0

type ElasticsearchNodeRoleType string
const (
	ElasticsearchNodeRoleTypeCombined            ElasticsearchNodeRoleType = "combined"
	ElasticsearchNodeRoleTypeMaster              ElasticsearchNodeRoleType = "master"
	ElasticsearchNodeRoleTypeData                ElasticsearchNodeRoleType = "data"
	ElasticsearchNodeRoleTypeDataContent         ElasticsearchNodeRoleType = "data-content"
	ElasticsearchNodeRoleTypeDataHot             ElasticsearchNodeRoleType = "data-hot"
	ElasticsearchNodeRoleTypeDataWarm            ElasticsearchNodeRoleType = "data-warm"
	ElasticsearchNodeRoleTypeDataCold            ElasticsearchNodeRoleType = "data-cold"
	ElasticsearchNodeRoleTypeDataFrozen          ElasticsearchNodeRoleType = "data-frozen"
	ElasticsearchNodeRoleTypeIngest              ElasticsearchNodeRoleType = "ingest"
	ElasticsearchNodeRoleTypeML                  ElasticsearchNodeRoleType = "ml"
	ElasticsearchNodeRoleTypeRemoteClusterClient ElasticsearchNodeRoleType = "remote-cluster-client"
	ElasticsearchNodeRoleTypeTransform           ElasticsearchNodeRoleType = "transform"
	ElasticsearchNodeRoleTypeCoordinating        ElasticsearchNodeRoleType = "coordinating"
)

type ElasticsearchRoleMapSpec

type ElasticsearchRoleMapSpec struct {
	// Specifies the reserved status.
	// Resources that have this set to true can’t be changed using the REST API or Kibana.
	// Default to "false".
	// +optional
	Reserved bool `json:"reserved,omitempty" yaml:"reserved,omitempty"`

	// Specifies the hidden status.
	// Resources that have this set to true are not returned by the REST API
	// and not visible in Kibana.
	// Default to "false".
	// +optional
	Hidden bool `json:"hidden,omitempty" yaml:"hidden,omitempty"`

	// Specifies a list of backend roles assigned to this role.
	// Backend roles can come from the internal user database,
	// LDAP groups, JSON web token claims or SAML assertions.
	// +optional
	BackendRoles []string `json:"backendRoles,omitempty" yaml:"backend_roles,omitempty"`

	// Specifies a list of hosts assigned to this role.
	// +optional
	Hosts []string `json:"hosts,omitempty" yaml:"hosts,omitempty"`

	// Specifies a list of users assigned to this role.
	// +optional
	Users []string `json:"users,omitempty" yaml:"users,omitempty"`

	// Specifies a list of backend roles (migrated from ES-version6) assigned to this role.
	AndBackendRoles []string `json:"andBackendRoles,omitempty" yaml:"and_backend_roles,omitempty"`
}

Specifies the role mapping structure. Both 'json' and 'yaml' tags are used in structure metadata. The `json` tags (camel case) are used while taking input from users. The `yaml` tags (snake case) are used by the operator to generate roles_mapping.yml file.

func (*ElasticsearchRoleMapSpec) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchRoleMapSpec.

func (*ElasticsearchRoleMapSpec) DeepCopyInto

func (in *ElasticsearchRoleMapSpec) DeepCopyInto(out *ElasticsearchRoleMapSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ElasticsearchSpec

type ElasticsearchSpec struct {
	// AutoOps contains configuration of automatic ops-request-recommendation generation
	// +optional
	AutoOps AutoOpsSpec `json:"autoOps,omitempty"`

	// Version of Elasticsearch to be deployed.
	Version string `json:"version"`

	// Number of instances to deploy for a Elasticsearch database.
	// +optional
	Replicas *int32 `json:"replicas,omitempty"`

	// Elasticsearch topology for node specification
	// +optional
	Topology *ElasticsearchClusterTopology `json:"topology,omitempty"`

	// To enable ssl for http layer
	EnableSSL bool `json:"enableSSL,omitempty"`

	// disable security of authPlugin (ie, xpack or searchguard). It disables authentication security of user.
	// If unset, default is false
	// +optional
	DisableSecurity bool `json:"disableSecurity,omitempty"`

	// Database authentication secret
	// +optional
	AuthSecret *SecretReference `json:"authSecret,omitempty"`

	// StorageType can be durable (default) or ephemeral
	StorageType StorageType `json:"storageType,omitempty"`

	// Storage to specify how storage shall be used.
	Storage *core.PersistentVolumeClaimSpec `json:"storage,omitempty"`

	// Init is used to initialize database
	// +optional
	Init *InitSpec `json:"init,omitempty"`

	// Monitor is used monitor database instance
	// +optional
	Monitor *mona.AgentSpec `json:"monitor,omitempty"`

	// ConfigSecret is an optional field to provide custom configuration file for database.
	// If specified, this file will be used as configuration file otherwise default configuration file will be used.
	// +optional
	ConfigSecret *core.LocalObjectReference `json:"configSecret,omitempty"`

	// SecureConfigSecret is an optional field to provide secure settings for database.
	//	- Ref: https://www.elastic.co/guide/en/elasticsearch/reference/7.14/secure-settings.html
	// Secure settings are store at "ES_CONFIG_DIR/elasticsearch.keystore" file (contents are encoded with password),
	// once the keystore created.
	// Expects a k8s secret name with data format:
	//	data:
	//		key: value
	//		password: KEYSTORE_PASSWORD
	//		s3.client.default.access_key: ACCESS_KEY
	//		s3.client.default.secret_key: SECRET_KEY
	// +optional
	SecureConfigSecret *core.LocalObjectReference `json:"secureConfigSecret,omitempty"`

	// PodTemplate is an optional configuration for pods used to expose database
	// +optional
	PodTemplate ofst.PodTemplateSpec `json:"podTemplate,omitempty"`

	// ServiceTemplates is an optional configuration for services used to expose database
	// +optional
	ServiceTemplates []NamedServiceTemplateSpec `json:"serviceTemplates,omitempty"`

	// An eviction is allowed if at most "maxUnavailable" pods selected by
	// "selector" are unavailable after the eviction, i.e. even in absence of
	// the evicted pod. For example, one can prevent all voluntary evictions
	// by specifying 0. This is a mutually exclusive setting with "minAvailable".
	// +optional
	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`

	// TLS contains tls configurations
	// +optional
	TLS *kmapi.TLSConfig `json:"tls,omitempty"`

	// InternalUsers contains internal user configurations.
	// Expected Input format:
	// internalUsers:
	//   <username1>:
	//		...
	//   <username2>:
	//		...
	// +optional
	InternalUsers map[string]ElasticsearchUserSpec `json:"internalUsers,omitempty"`

	// RolesMapping contains roles mapping configurations.
	// Expected Input format:
	// rolesMapping:
	//   <role1>:
	//		...
	//   <role2>:
	//		...
	// +optional
	RolesMapping map[string]ElasticsearchRoleMapSpec `json:"rolesMapping,omitempty"`

	// Indicates that the database is halted and all offshoot Kubernetes resources except PVCs are deleted.
	// +optional
	Halted bool `json:"halted,omitempty"`

	// TerminationPolicy controls the delete operation for database
	// +optional
	TerminationPolicy TerminationPolicy `json:"terminationPolicy,omitempty"`

	// KernelSettings contains the additional kernel settings.
	// +optional
	KernelSettings *KernelSettings `json:"kernelSettings,omitempty"`

	// HeapSizePercentage specifies both the initial heap allocation (xms) percentage and the maximum heap allocation (xmx) percentage.
	// Elasticsearch bootstrap fails, if -Xms and -Xmx are not equal.
	// Error: initial heap size [X] not equal to maximum heap size [Y]; this can cause resize pauses.
	// It will be applied to all nodes. If the node level `heapSizePercentage` is specified,  this global value will be overwritten.
	// It defaults to 50% of memory limit.
	// +optional
	// +kubebuilder:default=50
	HeapSizePercentage *int32 `json:"heapSizePercentage,omitempty"`

	// HealthChecker defines attributes of the health checker
	// +optional
	// +kubebuilder:default={periodSeconds: 10, timeoutSeconds: 10, failureThreshold: 1}
	HealthChecker kmapi.HealthCheckSpec `json:"healthChecker"`
}

func (*ElasticsearchSpec) DeepCopy

func (in *ElasticsearchSpec) DeepCopy() *ElasticsearchSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchSpec.

func (*ElasticsearchSpec) DeepCopyInto

func (in *ElasticsearchSpec) DeepCopyInto(out *ElasticsearchSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ElasticsearchStatus

type ElasticsearchStatus struct {
	// Specifies the current phase of the database
	// +optional
	Phase DatabasePhase `json:"phase,omitempty"`
	// observedGeneration is the most recent generation observed for this resource. It corresponds to the
	// resource's generation, which is updated on mutation by the API Server.
	// +optional
	ObservedGeneration int64 `json:"observedGeneration,omitempty"`
	// Conditions applied to the database, such as approval or denial.
	// +optional
	Conditions []kmapi.Condition `json:"conditions,omitempty"`
	// +optional
	AuthSecret *Age `json:"authSecret,omitempty"`
	// +optional
	Gateway *Gateway `json:"gateway,omitempty"`
}

func (*ElasticsearchStatus) DeepCopy

func (in *ElasticsearchStatus) DeepCopy() *ElasticsearchStatus

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchStatus.

func (*ElasticsearchStatus) DeepCopyInto

func (in *ElasticsearchStatus) DeepCopyInto(out *ElasticsearchStatus)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ElasticsearchUserSpec

type ElasticsearchUserSpec struct {
	// Specifies the hash of the password.
	// +optional
	Hash string `json:"-" yaml:"hash,omitempty"`

	// Specifies The full name of the user
	// Only applicable for xpack authplugin
	FullName string `json:"full_name,omitempty" yaml:"-"`

	// Specifies Arbitrary metadata that you want to associate with the user
	// Only applicable for xpack authplugin
	Metadata map[string]string `json:"metadata,omitempty" yaml:"-"`

	// Specifies the email of the user.
	// Only applicable for xpack authplugin
	Email string `json:"email,omitempty" yaml:"-"`

	// A set of roles the user has. The roles determine the user’s access permissions.
	// To create a user without any roles, specify an empty list: []
	// Only applicable for xpack authplugin
	Roles []string `json:"roles,omitempty" yaml:"-"`

	// Specifies the k8s secret name that holds the user credentials.
	// Default to "<resource-name>-<username>-cred".
	// +optional
	SecretName string `json:"secretName,omitempty" yaml:"-"`

	// Specifies the reserved status.
	// Resources that have this set to true can’t be changed using the REST API or Kibana.
	// Default to "false".
	// +optional
	Reserved bool `json:"reserved,omitempty" yaml:"reserved,omitempty"`

	// Specifies the hidden status.
	// Resources that have this set to true are not returned by the REST API
	// and not visible in Kibana.
	// Default to "false".
	// +optional
	Hidden bool `json:"hidden,omitempty" yaml:"hidden,omitempty"`

	// Specifies a list of backend roles assigned to this user.
	// Backend roles can come from the internal user database,
	// LDAP groups, JSON web token claims or SAML assertions.
	// +optional
	BackendRoles []string `json:"backendRoles,omitempty" yaml:"backend_roles,omitempty"`

	// Specifies a list of searchguard security plugin roles assigned to this user.
	// +optional
	SearchGuardRoles []string `json:"searchGuardRoles,omitempty" yaml:"search_guard_roles,omitempty"`

	// Specifies a list of opendistro security plugin roles assigned to this user.
	// +optional
	OpendistroSecurityRoles []string `json:"opendistroSecurityRoles,omitempty" yaml:"opendistro_security_roles,omitempty"`

	// Specifies one or more custom attributes,
	// which can be used in index names and DLS queries.
	// +optional
	Attributes map[string]string `json:"attributes,omitempty" yaml:"attributes,omitempty"`

	// Specifies the description of the user
	// +optional
	Description string `json:"description,omitempty" yaml:"description,omitempty"`
}

ElasticsearchUserSpec specifies the security plugin internal user structure. Both 'json' and 'yaml' tags are used in structure metadata. The `json` tags (camel case) are used while taking input from users. The `yaml` tags (snake case) are used by the operator to generate internal_users.yml file. For Elastic-Stack built-in users, there is no yaml files, instead the operator is responsible for creating/syncing the users. For the fields that are only used by operator, the metadata yaml tag is kept empty ("-") so that they do not interrupt in other distributions YAML generation.

func (*ElasticsearchUserSpec) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchUserSpec.

func (*ElasticsearchUserSpec) DeepCopyInto

func (in *ElasticsearchUserSpec) DeepCopyInto(out *ElasticsearchUserSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ErrantTransactionRecoveryPolicy added in v0.27.0

type ErrantTransactionRecoveryPolicy string
const (
	ErrantTransactionRecoveryPolicyClone             ErrantTransactionRecoveryPolicy = "Clone"
	ErrantTransactionRecoveryPolicyPseudoTransaction ErrantTransactionRecoveryPolicy = "PseudoTransaction"
)

type Etcd

type Etcd struct {
	metav1.TypeMeta   `json:",inline,omitempty"`
	metav1.ObjectMeta `json:"metadata,omitempty"`
	Spec              EtcdSpec   `json:"spec,omitempty"`
	Status            EtcdStatus `json:"status,omitempty"`
}

+kubebuilder:object:root=true +kubebuilder:resource:path=etcds,singular=etcd,shortName=etc,categories={datastore,kubedb,appscode,all} +kubebuilder:subresource:status +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"

func (Etcd) AppBindingMeta

func (r Etcd) AppBindingMeta() appcat.AppBindingMeta

func (*Etcd) AsOwner added in v0.35.0

func (e *Etcd) AsOwner() *metav1.OwnerReference

func (Etcd) ClientServiceName

func (e Etcd) ClientServiceName() string

func (Etcd) CustomResourceDefinition

func (_ Etcd) CustomResourceDefinition() *apiextensions.CustomResourceDefinition

func (*Etcd) DeepCopy

func (in *Etcd) DeepCopy() *Etcd

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Etcd.

func (*Etcd) DeepCopyInto

func (in *Etcd) DeepCopyInto(out *Etcd)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*Etcd) DeepCopyObject

func (in *Etcd) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

func (Etcd) GetAuthSecretName added in v0.29.0

func (e Etcd) GetAuthSecretName() string

func (Etcd) OffshootLabels

func (e Etcd) OffshootLabels() map[string]string

func (Etcd) OffshootName

func (e Etcd) OffshootName() string

func (Etcd) OffshootSelectors

func (e Etcd) OffshootSelectors() map[string]string

func (Etcd) PeerServiceName

func (e Etcd) PeerServiceName() string

func (Etcd) PodControllerLabels added in v0.23.0

func (e Etcd) PodControllerLabels() map[string]string

func (Etcd) PodLabels added in v0.23.0

func (e Etcd) PodLabels() map[string]string

func (*Etcd) ReplicasAreReady

func (e *Etcd) ReplicasAreReady(lister appslister.StatefulSetLister) (bool, string, error)

func (Etcd) ResourceFQN added in v0.16.0

func (e Etcd) ResourceFQN() string

func (Etcd) ResourceKind

func (e Etcd) ResourceKind() string

func (Etcd) ResourcePlural

func (e Etcd) ResourcePlural() string

func (Etcd) ResourceShortCode

func (e Etcd) ResourceShortCode() string

func (Etcd) ResourceSingular

func (e Etcd) ResourceSingular() string

func (Etcd) ServiceLabels added in v0.23.0

func (e Etcd) ServiceLabels(alias ServiceAlias, extraLabels ...map[string]string) map[string]string

func (*Etcd) SetDefaults

func (e *Etcd) SetDefaults()

func (Etcd) StatsService

func (e Etcd) StatsService() mona.StatsAccessor

func (Etcd) StatsServiceLabels

func (e Etcd) StatsServiceLabels() map[string]string

type EtcdList

type EtcdList struct {
	metav1.TypeMeta `json:",inline"`
	metav1.ListMeta `json:"metadata,omitempty"`
	// Items is a list of Etcd TPR objects
	Items []Etcd `json:"items,omitempty"`
}

func (*EtcdList) DeepCopy

func (in *EtcdList) DeepCopy() *EtcdList

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdList.

func (*EtcdList) DeepCopyInto

func (in *EtcdList) DeepCopyInto(out *EtcdList)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*EtcdList) DeepCopyObject

func (in *EtcdList) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

type EtcdSpec

type EtcdSpec struct {
	// Version of Etcd to be deployed.
	Version string `json:"version"`

	// Number of instances to deploy for a Etcd database.
	Replicas *int32 `json:"replicas,omitempty"`

	// StorageType can be durable (default) or ephemeral
	StorageType StorageType `json:"storageType,omitempty"`

	// Storage spec to specify how storage shall be used.
	Storage *core.PersistentVolumeClaimSpec `json:"storage,omitempty"`

	// Database authentication secret
	// +optional
	AuthSecret *SecretReference `json:"authSecret,omitempty"`

	// Init is used to initialize database
	// +optional
	Init *InitSpec `json:"init,omitempty"`

	// Monitor is used monitor database instance
	// +optional
	Monitor *mona.AgentSpec `json:"monitor,omitempty"`

	// etcd cluster TLS configuration
	TLS *TLSPolicy `json:"tls,omitempty"`

	// PodTemplate is an optional configuration for pods used to expose database
	// +optional
	PodTemplate ofst.PodTemplateSpec `json:"podTemplate,omitempty"`

	// ServiceTemplates is an optional configuration for services used to expose database
	// +optional
	ServiceTemplates []NamedServiceTemplateSpec `json:"serviceTemplates,omitempty"`

	// Indicates that the database is halted and all offshoot Kubernetes resources except PVCs are deleted.
	// +optional
	Halted bool `json:"halted,omitempty"`

	// TerminationPolicy controls the delete operation for database
	// +optional
	TerminationPolicy TerminationPolicy `json:"terminationPolicy,omitempty"`
}

func (*EtcdSpec) DeepCopy

func (in *EtcdSpec) DeepCopy() *EtcdSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdSpec.

func (*EtcdSpec) DeepCopyInto

func (in *EtcdSpec) DeepCopyInto(out *EtcdSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*EtcdSpec) GetPersistentSecrets

func (e *EtcdSpec) GetPersistentSecrets() []string

type EtcdStatus

type EtcdStatus struct {
	// Specifies the current phase of the database
	// +optional
	Phase DatabasePhase `json:"phase,omitempty"`
	// observedGeneration is the most recent generation observed for this resource. It corresponds to the
	// resource's generation, which is updated on mutation by the API Server.
	// +optional
	ObservedGeneration int64 `json:"observedGeneration,omitempty"`
	// Conditions applied to the database, such as approval or denial.
	// +optional
	Conditions []kmapi.Condition `json:"conditions,omitempty"`
	// +optional
	AuthSecret *Age `json:"authSecret,omitempty"`
	// +optional
	Gateway *Gateway `json:"gateway,omitempty"`
}

func (*EtcdStatus) DeepCopy

func (in *EtcdStatus) DeepCopy() *EtcdStatus

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStatus.

func (*EtcdStatus) DeepCopyInto