v1alpha2

package
v0.46.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jun 4, 2024 License: Apache-2.0 Imports: 52 Imported by: 86

Documentation

Overview

+k8s:deepcopy-gen=package,register +k8s:openapi-gen=true +k8s:defaulter-gen=TypeMeta +groupName=kubedb.com

nolint:goconst

Index

Constants

View Source
const (
	ResourceKindClickHouse     = "ClickHouse"
	ResourceSingularClickHouse = "clickhouse"
	ResourcePluralClickHouse   = "clickhouses"
	ResourceCodeClickHouse     = "ch"
)
View Source
const (
	// Deprecated
	DatabaseNamePrefix = "kubedb"

	KubeDBOrganization = "kubedb"

	LabelRole   = kubedb.GroupName + "/role"
	LabelPetSet = kubedb.GroupName + "/petset"

	ReplicationModeDetectorContainerName = "replication-mode-detector"
	DatabasePodPrimary                   = "primary"
	DatabasePodStandby                   = "standby"

	ComponentDatabase         = "database"
	ComponentConnectionPooler = "connection-pooler"
	RoleStats                 = "stats"
	DefaultStatsPath          = "/metrics"
	DefaultPasswordLength     = 16
	HealthCheckInterval       = 10 * time.Second

	ContainerExporterName = "exporter"
	LocalHost             = "localhost"
	LocalHostIP           = "127.0.0.1"

	DBCustomConfigName             = "custom-config"
	DefaultVolumeClaimTemplateName = "data"

	DBTLSVolume         = "tls-volume"
	DBExporterTLSVolume = "exporter-tls-volume"

	CACert = "ca.crt"

	// =========================== Database key Constants ============================
	PostgresKey      = ResourceSingularPostgres + "." + kubedb.GroupName
	ElasticsearchKey = ResourceSingularElasticsearch + "." + kubedb.GroupName
	MySQLKey         = ResourceSingularMySQL + "." + kubedb.GroupName
	MariaDBKey       = ResourceSingularMariaDB + "." + kubedb.GroupName
	PerconaXtraDBKey = ResourceSingularPerconaXtraDB + "." + kubedb.GroupName
	MongoDBKey       = ResourceSingularMongoDB + "." + kubedb.GroupName
	RedisKey         = ResourceSingularRedis + "." + kubedb.GroupName
	MemcachedKey     = ResourceSingularMemcached + "." + kubedb.GroupName
	EtcdKey          = ResourceSingularEtcd + "." + kubedb.GroupName
	ProxySQLKey      = ResourceSingularProxySQL + "." + kubedb.GroupName

	// =========================== Elasticsearch Constants ============================
	ElasticsearchRestPort                        = 9200
	ElasticsearchRestPortName                    = "http"
	ElasticsearchTransportPort                   = 9300
	ElasticsearchTransportPortName               = "transport"
	ElasticsearchPerformanceAnalyzerPort         = 9600
	ElasticsearchPerformanceAnalyzerPortName     = "analyzer"
	ElasticsearchNodeRoleSet                     = "set"
	ElasticsearchConfigDir                       = "/usr/share/elasticsearch/config"
	ElasticsearchOpenSearchConfigDir             = "/usr/share/opensearch/config"
	ElasticsearchSecureSettingsDir               = "/elasticsearch/secure-settings"
	ElasticsearchTempConfigDir                   = "/elasticsearch/temp-config"
	ElasticsearchCustomConfigDir                 = "/elasticsearch/custom-config"
	ElasticsearchDataDir                         = "/usr/share/elasticsearch/data"
	ElasticsearchOpenSearchDataDir               = "/usr/share/opensearch/data"
	ElasticsearchTempDir                         = "/tmp"
	ElasticsearchOpendistroSecurityConfigDir     = "/usr/share/elasticsearch/plugins/opendistro_security/securityconfig"
	ElasticsearchOpenSearchSecurityConfigDir     = "/usr/share/opensearch/plugins/opensearch-security/securityconfig"
	ElasticsearchOpenSearchSecurityConfigDirV2   = "/usr/share/opensearch/config/opensearch-security"
	ElasticsearchSearchGuardSecurityConfigDir    = "/usr/share/elasticsearch/plugins/search-guard-%v/sgconfig"
	ElasticsearchOpendistroReadallMonitorRole    = "readall_and_monitor"
	ElasticsearchOpenSearchReadallMonitorRole    = "readall_and_monitor"
	ElasticsearchSearchGuardReadallMonitorRoleV7 = "SGS_READALL_AND_MONITOR"
	ElasticsearchSearchGuardReadallMonitorRoleV6 = "sg_readall_and_monitor"
	ElasticsearchStatusGreen                     = "green"
	ElasticsearchStatusYellow                    = "yellow"
	ElasticsearchStatusRed                       = "red"
	ElasticsearchInitSysctlContainerName         = "init-sysctl"
	ElasticsearchInitConfigMergerContainerName   = "config-merger"
	ElasticsearchContainerName                   = "elasticsearch"
	ElasticsearchExporterContainerName           = "exporter"
	ElasticsearchSearchGuardRolesMappingFileName = "sg_roles_mapping.yml"
	ElasticsearchSearchGuardInternalUserFileName = "sg_internal_users.yml"
	ElasticsearchOpendistroRolesMappingFileName  = "roles_mapping.yml"
	ElasticsearchOpendistroInternalUserFileName  = "internal_users.yml"
	ElasticsearchJavaOptsEnv                     = "ES_JAVA_OPTS"
	ElasticsearchOpenSearchJavaOptsEnv           = "OPENSEARCH_JAVA_OPTS"
	ElasticsearchVolumeConfig                    = "esconfig"
	ElasticsearchVolumeTempConfig                = "temp-config"
	ElasticsearchVolumeSecurityConfig            = "security-config"
	ElasticsearchVolumeSecureSettings            = "secure-settings"
	ElasticsearchVolumeCustomConfig              = "custom-config"
	ElasticsearchVolumeData                      = "data"
	ElasticsearchVolumeTemp                      = "temp"

	// Ref:
	//	- https://www.elastic.co/guide/en/elasticsearch/reference/7.6/heap-size.html#heap-size
	//	- no more than 50% of your physical RAM
	//	- no more than 32GB that the JVM uses for compressed object pointers (compressed oops)
	//	- no more than 26GB for zero-based compressed oops;
	// 26 GB is safe on most systems
	ElasticsearchMaxHeapSize = 26 * 1024 * 1024 * 1024
	// 128MB
	ElasticsearchMinHeapSize = 128 * 1024 * 1024

	// =========================== Memcached Constants ============================
	MemcachedConfigKey              = "memcached.conf" // MemcachedConfigKey is going to create for the customize redis configuration
	MemcachedDatabasePortName       = "db"
	MemcachedPrimaryServicePortName = "primary"
	MemcachedDatabasePort           = 11211
	MemcachedShardKey               = MemcachedKey + "/shard"
	MemcachedContainerName          = ResourceSingularMemcached
	MemcachedConfigVolumePath       = "/etc/memcached/"

	MongoDBDatabasePortName       = "db"
	MongoDBPrimaryServicePortName = "primary"
	MongoDBDatabasePort           = 27017
	MongoDBKeyFileSecretSuffix    = "-key"
	MongoDBRootUsername           = "root"
	MongoDBCustomConfigFile       = "mongod.conf"
	MongoDBReplicaSetConfig       = "replicaset.json"
	MongoDBConfigurationJSFile    = "configuration.js"
	NodeTypeMongos                = "mongos"
	NodeTypeShard                 = "shard"
	NodeTypeConfig                = "configsvr"
	NodeTypeArbiter               = "arbiter"
	NodeTypeHidden                = "hidden"
	NodeTypeReplica               = "replica"
	NodeTypeStandalone            = "standalone"

	MongoDBWorkDirectoryName = "workdir"
	MongoDBWorkDirectoryPath = "/work-dir"

	MongoDBCertDirectoryName = "certdir"

	MongoDBDataDirectoryName = "datadir"
	MongoDBDataDirectoryPath = "/data/db"

	MongoDBInitInstallContainerName   = "copy-config"
	MongoDBInitBootstrapContainerName = "bootstrap"

	MongoDBConfigDirectoryName = "config"
	MongoDBConfigDirectoryPath = "/data/configdb"

	MongoDBInitialConfigDirectoryName = "configdir"
	MongoDBInitialConfigDirectoryPath = "/configdb-readonly"

	MongoDBInitScriptDirectoryName = "init-scripts"
	MongoDBInitScriptDirectoryPath = "/init-scripts"

	MongoDBInitialDirectoryName = "initial-script"
	MongoDBInitialDirectoryPath = "/docker-entrypoint-initdb.d"

	MongoDBClientCertDirectoryName = "client-cert"
	MongoDBClientCertDirectoryPath = "/client-cert"

	MongoDBServerCertDirectoryName = "server-cert"
	MongoDBServerCertDirectoryPath = "/server-cert"

	MongoDBInitialKeyDirectoryName = "keydir"
	MongoDBInitialKeyDirectoryPath = "/keydir-readonly"

	MongoDBContainerName = ResourceSingularMongoDB

	MongoDBDefaultVolumeClaimTemplateName = MongoDBDataDirectoryName

	MongodbUser             = "root"
	MongoDBKeyForKeyFile    = "key.txt"
	MongoDBAuthSecretSuffix = "-auth"

	// =========================== MySQL Constants ============================
	MySQLMetricsExporterConfigSecretSuffix = "metrics-exporter-config"
	MySQLDatabasePortName                  = "db"
	MySQLRouterReadWritePortName           = "rw"
	MySQLRouterReadOnlyPortName            = "ro"
	MySQLPrimaryServicePortName            = "primary"
	MySQLStandbyServicePortName            = "standby"
	MySQLDatabasePort                      = 3306
	MySQLRouterReadWritePort               = 6446
	MySQLRouterReadOnlyPort                = 6447

	MySQLCoordinatorClientPort = 2379
	MySQLCoordinatorPort       = 2380
	MySQLCoordinatorStatus     = "Coordinator/Status"

	MySQLGroupComPort    = 33060
	MySQLMaxGroupMembers = 9
	// The recommended MySQL server version for group replication (GR)
	MySQLGRRecommendedVersion = "8.0.23"
	MySQLDefaultGroupSize     = 3
	MySQLRootUserName         = "MYSQL_ROOT_USERNAME"
	MySQLRootPassword         = "MYSQL_ROOT_PASSWORD"
	MySQLName                 = "MYSQL_NAME"
	MySQLRootUser             = "root"

	MySQLTLSConfigCustom     = "custom"
	MySQLTLSConfigSkipVerify = "skip-verify"
	MySQLTLSConfigTrue       = "true"
	MySQLTLSConfigFalse      = "false"
	MySQLTLSConfigPreferred  = "preferred"

	MySQLContainerName            = "mysql"
	MySQLRouterContainerName      = "mysql-router"
	MySQLRouterInitContainerName  = "mysql-router-init"
	MySQLCoordinatorContainerName = "mysql-coordinator"
	MySQLInitContainerName        = "mysql-init"

	MySQLRouterInitScriptDirectoryName = "init-scripts"
	MySQLRouterInitScriptDirectoryPath = "/scripts"
	MySQLRouterConfigDirectoryName     = "router-config-secret"
	MySQLRouterConfigDirectoryPath     = "/etc/mysqlrouter"
	MySQLRouterTLSDirectoryName        = "router-tls-volume"
	MySQLRouterTLSDirectoryPath        = "/etc/mysql/certs"
	MySQLReplicationUser               = "repl"

	MySQLComponentKey     = MySQLKey + "/component"
	MySQLComponentDB      = "database"
	MySQLComponentRouter  = "router"
	MySQLCustomConfigFile = "my-inline.cnf"

	MySQLVolumeNameTemp      = "tmp"
	MySQLVolumeMountPathTemp = "/tmp"

	MySQLVolumeNameData      = "data"
	MySQLVolumeMountPathData = "/var/lib/mysql"

	MySQLVolumeNameUserInitScript      = "initial-script"
	MySQLVolumeMountPathUserInitScript = "/docker-entrypoint-initdb.d"

	MySQLVolumeNameInitScript      = "init-scripts"
	MySQLVolumeMountPathInitScript = "/scripts"

	MySQLVolumeNameCustomConfig      = "custom-config"
	MySQLVolumeMountPathCustomConfig = "/etc/mysql/conf.d"

	MySQLVolumeNameTLS      = "tls-volume"
	MySQLVolumeMountPathTLS = "/etc/mysql/certs"

	MySQLVolumeNameExporterTLS      = "exporter-tls-volume"
	MySQLVolumeMountPathExporterTLS = "/etc/mysql/certs"

	MySQLVolumeNameSourceCA      = "source-ca"
	MySQLVolumeMountPathSourceCA = "/etc/mysql/server/certs"

	// =========================== PerconaXtraDB Constants ============================
	PerconaXtraDBClusterRecommendedVersion     = "5.7"
	PerconaXtraDBMaxClusterNameLength          = 32
	PerconaXtraDBStandaloneReplicas            = 1
	PerconaXtraDBDefaultClusterSize            = 3
	PerconaXtraDBDataMountPath                 = "/var/lib/mysql"
	PerconaXtraDBDataLostFoundPath             = PerconaXtraDBDataMountPath + "/lost+found"
	PerconaXtraDBInitDBVolumeName              = "initial-script"
	PerconaXtraDBInitDBMountPath               = "/docker-entrypoint-initdb.d"
	PerconaXtraDBCustomConfigMountPath         = "/etc/percona-server.conf.d/"
	PerconaXtraDBClusterCustomConfigMountPath  = "/etc/mysql/custom.conf.d/"
	PerconaXtraDBCustomConfigVolumeName        = "custom-config"
	PerconaXtraDBTLSConfigCustom               = "custom"
	PerconaXtraDBInitContainerName             = "px-init"
	PerconaXtraDBCoordinatorContainerName      = "px-coordinator"
	PerconaXtraDBRunScriptVolumeName           = "run-script"
	PerconaXtraDBRunScriptVolumeMountPath      = "/run-script"
	PerconaXtraDBInitScriptVolumeName          = "init-scripts"
	PerconaXtraDBInitScriptVolumeMountPath     = "/scripts"
	PerconaXtraDBContainerName                 = ResourceSingularPerconaXtraDB
	PerconaXtraDBCertMountPath                 = "/etc/mysql/certs"
	PerconaXtraDBExporterConfigFileName        = "exporter.cnf"
	PerconaXtraDBGaleraClusterPrimaryComponent = "Primary"
	PerconaXtraDBServerTLSVolumeName           = "tls-server-config"
	PerconaXtraDBClientTLSVolumeName           = "tls-client-config"
	PerconaXtraDBExporterTLSVolumeName         = "tls-metrics-exporter-config"
	PerconaXtraDBMetricsExporterTLSVolumeName  = "metrics-exporter-config"
	PerconaXtraDBMetricsExporterConfigPath     = "/etc/mysql/config/exporter"
	PerconaXtraDBDataVolumeName                = "data"
	PerconaXtraDBMySQLUserGroupID              = 1001

	// =========================== MariaDB Constants ============================
	MariaDBMaxClusterNameLength          = 32
	MariaDBStandaloneReplicas            = 1
	MariaDBDefaultClusterSize            = 3
	MariaDBDataMountPath                 = "/var/lib/mysql"
	MariaDBDataLostFoundPath             = MariaDBDataMountPath + "/lost+found"
	MariaDBInitDBVolumeName              = "initial-script"
	MariaDBInitDBMountPath               = "/docker-entrypoint-initdb.d"
	MariaDBCustomConfigMountPath         = "/etc/mysql/conf.d/"
	MariaDBClusterCustomConfigMountPath  = "/etc/mysql/custom.conf.d/"
	MariaDBCustomConfigVolumeName        = "custom-config"
	MariaDBTLSConfigCustom               = "custom"
	MariaDBInitContainerName             = "mariadb-init"
	MariaDBCoordinatorContainerName      = "md-coordinator"
	MariaDBRunScriptVolumeName           = "run-script"
	MariaDBRunScriptVolumeMountPath      = "/run-script"
	MariaDBInitScriptVolumeName          = "init-scripts"
	MariaDBInitScriptVolumeMountPath     = "/scripts"
	MariaDBContainerName                 = ResourceSingularMariaDB
	MariaDBCertMountPath                 = "/etc/mysql/certs"
	MariaDBExporterConfigFileName        = "exporter.cnf"
	MariaDBGaleraClusterPrimaryComponent = "Primary"
	MariaDBServerTLSVolumeName           = "tls-server-config"
	MariaDBClientTLSVolumeName           = "tls-client-config"
	MariaDBExporterTLSVolumeName         = "tls-metrics-exporter-config"
	MariaDBMetricsExporterTLSVolumeName  = "metrics-exporter-config"
	MariaDBMetricsExporterConfigPath     = "/etc/mysql/config/exporter"
	MariaDBDataVolumeName                = "data"

	// =========================== SingleStore Constants ============================
	SinglestoreDatabasePortName       = "db"
	SinglestorePrimaryServicePortName = "primary"
	SinglestoreStudioPortName         = "studio"

	SinglestoreDatabasePort = 3306
	SinglestoreStudioPort   = 8081
	SinglestoreExporterPort = 9104

	SinglestoreRootUserName = "ROOT_USERNAME"
	SinglestoreRootPassword = "ROOT_PASSWORD"
	SinglestoreRootUser     = "root"
	DatabasePodMaster       = "Master"
	DatabasePodAggregator   = "Aggregator"
	DatabasePodLeaf         = "Leaf"
	PetSetTypeAggregator    = "aggregator"
	PetSetTypeLeaf          = "leaf"
	PetSetTypeStandalone    = "standalone"

	SinglestoreDatabaseHealth = "singlestore_health"
	SinglestoreTableHealth    = "singlestore_health_table"

	SinglestoreCoordinatorContainerName = "singlestore-coordinator"
	SinglestoreContainerName            = "singlestore"
	SinglestoreInitContainerName        = "singlestore-init"

	SinglestoreVolumeNameUserInitScript      = "initial-script"
	SinglestoreVolumeMountPathUserInitScript = "/docker-entrypoint-initdb.d"
	SinglestoreVolumeNameCustomConfig        = "custom-config"
	SinglestoreVolumeMountPathCustomConfig   = "/etc/memsql/conf.d"
	SinglestoreVolmeNameInitScript           = "init-scripts"
	SinglestoreVolumeMountPathInitScript     = "/scripts"
	SinglestoreVolumeNameData                = "data"
	SinglestoreVolumeMountPathData           = "/var/lib/memsql"
	SinglestoreVolumeNameTLS                 = "tls-volume"
	SinglestoreVolumeMountPathTLS            = "/etc/memsql/certs"

	SinglestoreTLSConfigCustom     = "custom"
	SinglestoreTLSConfigSkipVerify = "skip-verify"
	SinglestoreTLSConfigTrue       = "true"
	SinglestoreTLSConfigFalse      = "false"
	SinglestoreTLSConfigPreferred  = "preferred"

	// =========================== MSSQLServer Constants ============================
	MSSQLSAUser = "sa"

	AGPrimaryReplicaReadyCondition = "AGPrimaryReplicaReady"

	MSSQLDatabasePodPrimary       = "primary"
	MSSQLDatabasePodSecondary     = "secondary"
	MSSQLSecondaryServiceAlias    = "secondary"
	MSSQLSecondaryServicePortName = "secondary"

	// port related
	MSSQLDatabasePortName              = "db"
	MSSQLPrimaryServicePortName        = "primary"
	MSSQLDatabasePort                  = 1433
	MSSQLDatabaseMirroringEndpointPort = 5022
	MSSQLCoordinatorPort               = 2381

	// environment variables
	EnvAcceptEula        = "ACCEPT_EULA"
	EnvMSSQLEnableHADR   = "MSSQL_ENABLE_HADR"
	EnvMSSQLAgentEnabled = "MSSQL_AGENT_ENABLED"
	EnvMSSQLSAUsername   = "MSSQL_SA_USERNAME"
	EnvMSSQLSAPassword   = "MSSQL_SA_PASSWORD"

	// container related
	MSSQLContainerName            = "mssql"
	MSSQLCoordinatorContainerName = "mssql-coordinator"
	MSSQLInitContainerName        = "mssql-init"

	// volume related
	MSSQLVolumeNameData                        = "data"
	MSSQLVolumeMountPathData                   = "/var/opt/mssql"
	MSSQLVolumeNameInitScript                  = "init-scripts"
	MSSQLVolumeMountPathInitScript             = "/scripts"
	MSSQLVolumeNameEndpointCert                = "endpoint-cert"
	MSSQLVolumeMountPathEndpointCert           = "/var/opt/mssql/endpoint-cert"
	MSSQLVolumeNameCerts                       = "certs"
	MSSQLVolumeMountPathCerts                  = "/var/opt/mssql/certs"
	MSSQLVolumeNameTLS                         = "tls"
	MSSQLVolumeMountPathTLS                    = "/var/opt/mssql/tls"
	MSSQLVolumeNameSecurityCACertificates      = "security-ca-certificates"
	MSSQLVolumeMountPathSecurityCACertificates = "/var/opt/mssql/security/ca-certificates"
	MSSQLVolumeNameCACerts                     = "cacerts"
	MSSQLVolumeMountPathCACerts                = "/etc/ssl/certs"

	// tls related
	MSSQLInternalTLSCrt = "tls.crt"
	MSSQLInternalTLSKey = "tls.key"

	// =========================== PostgreSQL Constants ============================
	PostgresDatabasePortName          = "db"
	PostgresPrimaryServicePortName    = "primary"
	PostgresStandbyServicePortName    = "standby"
	PostgresDatabasePort              = 5432
	PostgresPodPrimary                = "primary"
	PostgresPodStandby                = "standby"
	EnvPostgresUser                   = "POSTGRES_USER"
	EnvPostgresPassword               = "POSTGRES_PASSWORD"
	PostgresRootUser                  = "postgres"
	PostgresCoordinatorContainerName  = "pg-coordinator"
	PostgresCoordinatorPort           = 2380
	PostgresCoordinatorPortName       = "coordinator"
	PostgresContainerName             = ResourceSingularPostgres
	PostgresInitContainerName         = "postgres-init-container"
	PostgresCoordinatorClientPort     = 2379
	PostgresCoordinatorClientPortName = "coordinatclient"

	RaftMetricsExporterPort     = 23790
	RaftMetricsExporterPortName = "raft-metrics"

	PostgresInitVolumeName           = "initial-script"
	PostgresInitDir                  = "/var/initdb"
	PostgresSharedMemoryVolumeName   = "shared-memory"
	PostgresSharedMemoryDir          = "/dev/shm"
	PostgresDataVolumeName           = "data"
	PostgresDataDir                  = "/var/pv"
	PostgresCustomConfigVolumeName   = "custom-config"
	PostgresCustomConfigDir          = "/etc/config"
	PostgresRunScriptsVolumeName     = "run-scripts"
	PostgresRunScriptsDir            = "/run_scripts"
	PostgresRoleScriptsVolumeName    = "role-scripts"
	PostgresRoleScriptsDir           = "/role_scripts"
	PostgresSharedScriptsVolumeName  = "scripts"
	PostgresSharedScriptsDir         = "/scripts"
	PostgresSharedTlsVolumeName      = "certs"
	PostgresSharedTlsVolumeMountPath = "/tls/certs"
	PostgresCustomConfigFile         = "user.conf"

	PostgresKeyFileSecretSuffix = "key"
	PostgresPEMSecretSuffix     = "pem"
	PostgresDefaultUsername     = "postgres"
	PostgresPgCoordinatorStatus = "Coordinator/Status"
	// to pause the failover for postgres. this is helpful for ops request
	PostgresPgCoordinatorStatusPause = "Pause"
	// to resume the failover for postgres. this is helpful for ops request
	PostgresPgCoordinatorStatusResume = "Resume"

	// when we need to resume pg-coordinator as non transferable we are going to set this state.
	// this is useful when we have set a node as primary and you don't want other node rather then this node to become primary.
	PostgresPgCoordinatorStatusResumeNonTransferable = "NonTransferableResume"

	SharedBuffersGbAsByte = 1024 * 1024 * 1024
	SharedBuffersMbAsByte = 1024 * 1024

	SharedBuffersGbAsKiloByte = 1024 * 1024
	SharedBuffersMbAsKiloByte = 1024
	IPS_LOCK                  = "IPC_LOCK"
	SYS_RESOURCE              = "SYS_RESOURCE"
	DropCapabilityALL         = "ALL"

	// =========================== ProxySQL Constants ============================
	LabelProxySQLName                  = ProxySQLKey + "/name"
	LabelProxySQLLoadBalance           = ProxySQLKey + "/load-balance"
	LabelProxySQLLoadBalanceStandalone = "Standalone"

	ProxySQLContainerName          = ResourceSingularProxySQL
	ProxySQLDatabasePort           = 6033
	ProxySQLDatabasePortName       = "db"
	ProxySQLPrimaryServicePortName = "db"
	ProxySQLAdminPort              = 6032
	ProxySQLAdminPortName          = "admin"
	ProxySQLDataMountPath          = "/var/lib/proxysql"
	ProxySQLCustomConfigMountPath  = "/etc/custom-config"

	ProxySQLBackendSSLMountPath  = "/var/lib/certs"
	ProxySQLFrontendSSLMountPath = "/var/lib/frontend"
	ProxySQLClusterAdmin         = "cluster"
	ProxySQLClusterPasswordField = "cluster_password"
	ProxySQLTLSConfigCustom      = "custom"
	ProxySQLTLSConfigSkipVerify  = "skip-verify"

	ProxySQLMonitorUsername = "proxysql"
	ProxySQLAuthUsername    = "cluster"
	ProxySQLConfigSecretKey = "proxysql.cnf"

	// =========================== Redis Constants ============================
	RedisConfigKey = "redis.conf" // RedisConfigKey is going to create for the customize redis configuration
	// DefaultConfigKey is going to create for the default redis configuration
	RedisContainerName             = ResourceSingularRedis
	RedisSentinelContainerName     = "redissentinel"
	DefaultConfigKey               = "default.conf"
	RedisShardKey                  = RedisKey + "/shard"
	RedisDatabasePortName          = "db"
	RedisPrimaryServicePortName    = "primary"
	RedisDatabasePort              = 6379
	RedisSentinelPort              = 26379
	RedisGossipPortName            = "gossip"
	RedisGossipPort                = 16379
	RedisSentinelPortName          = "sentinel"
	RedisInitContainerName         = "redis-init"
	RedisCoordinatorContainerName  = "rd-coordinator"
	RedisSentinelInitContainerName = "sentinel-init"

	RedisScriptVolumeName      = "script-vol"
	RedisScriptVolumePath      = "/scripts"
	RedisDataVolumeName        = "data"
	RedisDataVolumePath        = "/data"
	RedisTLSVolumeName         = "tls-volume"
	RedisExporterTLSVolumeName = "exporter-tls-volume"
	RedisTLSVolumePath         = "/certs"
	RedisSentinelTLSVolumeName = "sentinel-tls-volume"
	RedisSentinelTLSVolumePath = "/sentinel-certs"
	RedisConfigVolumeName      = "redis-config"
	RedisConfigVolumePath      = "/usr/local/etc/redis/"
	RedisInitVolumeName        = "init-volume"
	RedisInitVolumePath        = "/init"

	RedisNodeFlagMaster = "master"
	RedisNodeFlagNoAddr = "noaddr"
	RedisNodeFlagSlave  = "slave"

	RedisKeyFileSecretSuffix = "key"
	RedisPEMSecretSuffix     = "pem"
	RedisRootUsername        = "default"

	EnvRedisUser              = "USERNAME"
	EnvRedisPassword          = "REDISCLI_AUTH"
	EnvRedisMode              = "REDIS_MODE"
	EnvRedisMajorRedisVersion = "MAJOR_REDIS_VERSION"

	// =========================== PgBouncer Constants ============================
	PgBouncerUpstreamServerCA               = "upstream-server-ca.crt"
	PgBouncerUpstreamServerClientCert       = "upstream-server-client.crt"
	PgBouncerUpstreamServerClientKey        = "upstream-server-client.key"
	PgBouncerClientCrt                      = "client.crt"
	PgBouncerClientKey                      = "client.key"
	PgBouncerCACrt                          = "ca.crt"
	PgBouncerTLSCrt                         = "tls.crt"
	PgBouncerTLSKey                         = "tls.key"
	PgBouncerDatabasePortName               = "db"
	PgBouncerPrimaryServicePortName         = "primary"
	PgBouncerDatabasePort                   = 5432
	PgBouncerConfigFile                     = "pgbouncer.ini"
	PgBouncerAdminUsername                  = "pgbouncer"
	PgBouncerDefaultPoolMode                = "session"
	PgBouncerDefaultIgnoreStartupParameters = "empty"
	BackendSecretResourceVersion            = "backend-secret-resource-version"

	// =========================== Pgpool Constants ============================
	EnvPostgresUsername                = "POSTGRES_USERNAME"
	EnvPgpoolPcpUser                   = "PGPOOL_PCP_USER"
	EnvPgpoolPcpPassword               = "PGPOOL_PCP_PASSWORD"
	EnvPgpoolPasswordEncryptionMethod  = "PGPOOL_PASSWORD_ENCRYPTION_METHOD"
	EnvEnablePoolPasswd                = "PGPOOL_ENABLE_POOL_PASSWD"
	EnvSkipPasswdEncryption            = "PGPOOL_SKIP_PASSWORD_ENCRYPTION"
	PgpoolConfigSecretMountPath        = "/config"
	PgpoolConfigVolumeName             = "pgpool-config"
	PgpoolContainerName                = "pgpool"
	PgpoolDefaultServicePort           = 9999
	PgpoolMonitoringDefaultServicePort = 9719
	PgpoolPcpPort                      = 9595
	PgpoolExporterDatabase             = "postgres"
	EnvPgpoolExporterDatabase          = "POSTGRES_DATABASE"
	EnvPgpoolService                   = "PGPOOL_SERVICE"
	EnvPgpoolServicePort               = "PGPOOL_SERVICE_PORT"
	EnvPgpoolSSLMode                   = "SSLMODE"
	EnvPgpoolExporterConnectionString  = "DATA_SOURCE_NAME"
	PgpoolDefaultSSLMode               = "disable"
	PgpoolExporterContainerName        = "exporter"
	PgpoolAuthUsername                 = "pcp"
	SyncPeriod                         = 10
	PgpoolTlsVolumeName                = "certs"
	PgpoolTlsVolumeMountPath           = "/config/tls"
	PgpoolExporterTlsVolumeName        = "exporter-certs"
	PgpoolExporterTlsVolumeMountPath   = "/tls/certs"
	PgpoolRootUser                     = "postgres"
	PgpoolPrimaryServicePortName       = "primary"
	PgpoolDatabasePortName             = "db"
	PgpoolPcpPortName                  = "pcp"
	PgpoolCustomConfigFile             = "pgpool.conf"

	KubeDBZooKeeperRoleName         = "kubedb:zookeeper-version-reader"
	KubeDBZooKeeperRoleBindingName  = "kubedb:zookeeper-version-reader"
	ZooKeeperClientPortName         = "client"
	ZooKeeperClientPort             = 2181
	ZooKeeperQuorumPortName         = "quorum"
	ZooKeeperQuorumPort             = 2888
	ZooKeeperLeaderElectionPortName = "leader-election"
	ZooKeeperLeaderElectionPort     = 3888
	ZooKeeperMetricsPortName        = "metrics"
	ZooKeeperMetricsPort            = 7000
	ZooKeeperAdminServerPortName    = "admin-server"
	ZooKeeperAdminServerPort        = 8080
	ZooKeeperNode                   = "/kubedb_health_checker_node"
	ZooKeeperData                   = "kubedb_health_checker_data"
	ZooKeeperConfigVolumeName       = "zookeeper-config"
	ZooKeeperConfigVolumePath       = "/conf"
	ZooKeeperDataVolumeName         = "data"
	ZooKeeperDataVolumePath         = "/data"
	ZooKeeperScriptVolumeName       = "script-vol"
	ZooKeeperScriptVolumePath       = "/scripts"
	ZooKeeperContainerName          = ResourceSingularZooKeeper
	ZooKeeperInitContainerName      = ResourceSingularZooKeeper + "-init"

	ZooKeeperConfigFileName               = "zoo.cfg"
	ZooKeeperLog4jPropertiesFileName      = "log4j.properties"
	ZooKeeperLog4jQuietPropertiesFileName = "log4j-quiet.properties"

	EnvZooKeeperDomain          = "DOMAIN"
	EnvZooKeeperQuorumPort      = "QUORUM_PORT"
	EnvZooKeeperLeaderPort      = "LEADER_PORT"
	EnvZooKeeperClientHost      = "CLIENT_HOST"
	EnvZooKeeperClientPort      = "CLIENT_PORT"
	EnvZooKeeperAdminServerHost = "ADMIN_SERVER_HOST"
	EnvZooKeeperAdminServerPort = "ADMIN_SERVER_PORT"
	EnvZooKeeperClusterName     = "CLUSTER_NAME"
	EnvZooKeeperClusterSize     = "CLUSTER_SIZE"
	EnvZooKeeperUser            = "ZK_USER"
	EnvZooKeeperPassword        = "ZK_PASSWORD"
	EnvZooKeeperJaasFilePath    = "ZK_JAAS_FILE_PATH"
	EnvZooKeeperJVMFLags        = "JVMFLAGS"

	ZooKeeperSuperUsername       = "super"
	ZooKeeperSASLAuthLoginConfig = "-Djava.security.auth.login.config"
	ZooKeeperJaasFilePath        = "/data/jaas.conf"
)
View Source
const (
	// used for Databases that have started provisioning
	DatabaseProvisioningStarted = "ProvisioningStarted"
	// used for Databases which completed provisioning
	DatabaseProvisioned = "Provisioned"
	// used for Databases that are currently being initialized using stash
	DatabaseDataRestoreStarted = "DataRestoreStarted"
	// used for Databases that have been initialized using stash
	DatabaseDataRestored = "DataRestored"
	// used for Databases whose pods are ready
	DatabaseReplicaReady = "ReplicaReady"
	// used for Databases that are currently accepting connection
	DatabaseAcceptingConnection = "AcceptingConnection"
	// used for Databases that report status OK (also implies that we can connect to it)
	DatabaseReady = "Ready"
	// used for database that reports ok when all the instances are available
	ServerReady = "ServerReady"
	// used for Databases that are paused
	DatabasePaused = "Paused"
	// used for Databases that are halted
	DatabaseHalted = "Halted"
	// used for pausing health check of a Database
	DatabaseHealthCheckPaused = "HealthCheckPaused"
	// used for Databases whose internal user credentials are synced
	InternalUsersSynced = "InternalUsersSynced"
	// user for databases that have read access
	DatabaseReadAccess = "DatabaseReadAccess"
	// user for databases that have write access
	DatabaseWriteAccess = "DatabaseWriteAccess"

	// Condition reasons
	DataRestoreStartedByExternalInitializer    = "DataRestoreStartedByExternalInitializer"
	DataRestoreInterrupted                     = "DataRestoreInterrupted"
	DatabaseSuccessfullyRestored               = "SuccessfullyDataRestored"
	FailedToRestoreData                        = "FailedToRestoreData"
	AllReplicasAreReady                        = "AllReplicasReady"
	SomeReplicasAreNotReady                    = "SomeReplicasNotReady"
	DatabaseAcceptingConnectionRequest         = "DatabaseAcceptingConnectionRequest"
	DatabaseNotAcceptingConnectionRequest      = "DatabaseNotAcceptingConnectionRequest"
	ReadinessCheckSucceeded                    = "ReadinessCheckSucceeded"
	ReadinessCheckFailed                       = "ReadinessCheckFailed"
	DatabaseProvisioningStartedSuccessfully    = "DatabaseProvisioningStartedSuccessfully"
	DatabaseSuccessfullyProvisioned            = "DatabaseSuccessfullyProvisioned"
	DatabaseHaltedSuccessfully                 = "DatabaseHaltedSuccessfully"
	DatabaseReadAccessCheckSucceeded           = "DatabaseReadAccessCheckSucceeded"
	DatabaseWriteAccessCheckSucceeded          = "DatabaseWriteAccessCheckSucceeded"
	DatabaseReadAccessCheckFailed              = "DatabaseReadAccessCheckFailed"
	DatabaseWriteAccessCheckFailed             = "DatabaseWriteAccessCheckFailed"
	InternalUsersCredentialSyncFailed          = "InternalUsersCredentialsSyncFailed"
	InternalUsersCredentialsSyncedSuccessfully = "InternalUsersCredentialsSyncedSuccessfully"
)

List of possible condition types for a KubeDB object

View Source
const (
	KafkaPortNameREST                  = "http"
	KafkaPortNameController            = "controller"
	KafkaPortNameCruiseControlListener = "cc-listener"
	KafkaPortNameCruiseControlREST     = "cc-rest"
	KafkaBrokerClientPortName          = "broker"
	KafkaControllerClientPortName      = "controller"
	KafkaPortNameLocal                 = "local"
	KafkaTopicNameHealth               = "kafka-health"
	KafkaTopicDeletionThresholdOffset  = 1000
	KafkaBrokerMaxID                   = 1000
	KafkaRESTPort                      = 9092
	KafkaControllerRESTPort            = 9093
	KafkaLocalRESTPort                 = 29092
	KafkaCruiseControlRESTPort         = 9090
	KafkaCruiseControlListenerPort     = 9094
	KafkaCCDefaultInNetwork            = 500000
	KafkaCCDefaultOutNetwork           = 500000

	KafkaContainerName          = "kafka"
	KafkaUserAdmin              = "admin"
	KafkaNodeRoleSet            = "set"
	KafkaNodeRolesCombined      = "controller,broker"
	KafkaNodeRolesController    = "controller"
	KafkaNodeRolesBrokers       = "broker"
	KafkaNodeRolesCruiseControl = "cruise-control"
	KafkaStandbyServiceSuffix   = "standby"

	KafkaBrokerListener     = "KafkaBrokerListener"
	KafkaControllerListener = "KafkaControllerListener"

	KafkaDataDir                              = "/var/log/kafka"
	KafkaMetaDataDir                          = "/var/log/kafka/metadata"
	KafkaCertDir                              = "/var/private/ssl"
	KafkaConfigDir                            = "/opt/kafka/config/kafkaconfig"
	KafkaTempConfigDir                        = "/opt/kafka/config/temp-config"
	KafkaCustomConfigDir                      = "/opt/kafka/config/custom-config"
	KafkaCCTempConfigDir                      = "/opt/cruise-control/temp-config"
	KafkaCCCustomConfigDir                    = "/opt/cruise-control/custom-config"
	KafkaCapacityConfigPath                   = "config/capacity.json"
	KafkaConfigFileName                       = "config.properties"
	KafkaServerCustomConfigFileName           = "server.properties"
	KafkaBrokerCustomConfigFileName           = "broker.properties"
	KafkaControllerCustomConfigFileName       = "controller.properties"
	KafkaSSLPropertiesFileName                = "ssl.properties"
	KafkaClientAuthConfigFileName             = "clientauth.properties"
	KafkaCruiseControlConfigFileName          = "cruisecontrol.properties"
	KafkaCruiseControlCapacityConfigFileName  = "capacity.json"
	KafkaCruiseControlBrokerSetConfigFileName = "brokerSets.json"
	KafkaCruiseControlClusterConfigFileName   = "clusterConfigs.json"
	KafkaCruiseControlLog4jConfigFileName     = "log4j.properties"
	KafkaCruiseControlUIConfigFileName        = "config.csv"

	KafkaListeners                         = "listeners"
	KafkaAdvertisedListeners               = "advertised.listeners"
	KafkaBootstrapServers                  = "bootstrap.servers"
	KafkaListenerSecurityProtocolMap       = "listener.security.protocol.map"
	KafkaControllerNodeCount               = "controller.count"
	KafkaControllerQuorumVoters            = "controller.quorum.voters"
	KafkaControllerListenersName           = "controller.listener.names"
	KafkaInterBrokerListener               = "inter.broker.listener.name"
	KafkaNodeRole                          = "process.roles"
	KafkaClusterID                         = "cluster.id"
	KafkaClientID                          = "client.id"
	KafkaDataDirName                       = "log.dirs"
	KafkaMetadataDirName                   = "metadata.log.dir"
	KafkaKeystorePasswordKey               = "keystore_password"
	KafkaTruststorePasswordKey             = "truststore_password"
	KafkaServerKeystoreKey                 = "server.keystore.jks"
	KafkaServerTruststoreKey               = "server.truststore.jks"
	KafkaSecurityProtocol                  = "security.protocol"
	KafkaGracefulShutdownTimeout           = "task.shutdown.graceful.timeout.ms"
	KafkaTopicConfigProviderClass          = "topic.config.provider.class"
	KafkaCapacityConfigFile                = "capacity.config.file"
	KafkaTwoStepVerification               = "two.step.verification.enabled"
	KafkaBrokerFailureDetection            = "kafka.broker.failure.detection.enable"
	KafkaMetricSamplingInterval            = "metric.sampling.interval.ms"
	KafkaPartitionMetricsWindow            = "partition.metrics.window.ms"
	KafkaPartitionMetricsWindowNum         = "num.partition.metrics.windows"
	KafkaSampleStoreTopicReplicationFactor = "sample.store.topic.replication.factor"

	KafkaEndpointVerifyAlgo  = "ssl.endpoint.identification.algorithm"
	KafkaKeystoreLocation    = "ssl.keystore.location"
	KafkaTruststoreLocation  = "ssl.truststore.location"
	KafkaKeystorePassword    = "ssl.keystore.password"
	KafkaTruststorePassword  = "ssl.truststore.password"
	KafkaKeyPassword         = "ssl.key.password"
	KafkaTruststoreType      = "ssl.truststore.type"
	KafkaKeystoreType        = "ssl.keystore.type"
	KafkaTruststoreTypeJKS   = "JKS"
	KafkaKeystoreDefaultPass = "changeit"

	KafkaMetricReporters       = "metric.reporters"
	KafkaAutoCreateTopicEnable = "auto.create.topics.enable"

	KafkaEnabledSASLMechanisms       = "sasl.enabled.mechanisms"
	KafkaSASLMechanism               = "sasl.mechanism"
	KafkaMechanismControllerProtocol = "sasl.mechanism.controller.protocol"
	KafkaSASLInterBrokerProtocol     = "sasl.mechanism.inter.broker.protocol"
	KafkaSASLPLAINConfigKey          = "listener.name.SASL_PLAINTEXT.plain.sasl.jaas.config"
	KafkaSASLSSLConfigKey            = "listener.name.SASL_SSL.plain.sasl.jaas.config"
	KafkaSASLJAASConfig              = "sasl.jaas.config"
	KafkaServiceName                 = "serviceName"
	KafkaSASLPlainMechanism          = "PLAIN"

	KafkaCCMetricSamplerClass            = "metric.sampler.class"
	KafkaCCCapacityConfig                = "capacity.config.file"
	KafkaCCTwoStepVerificationEnabled    = "two.step.verification.enabled"
	KafkaCCBrokerFailureDetectionEnabled = "kafka.broker.failure.detection.enable"
	KafkaOffSetTopicReplica              = "offsets.topic.replication.factor"
	KafkaTransactionStateLogReplica      = "transaction.state.log.replication.factor"
	KafkaTransactionSateLogMinISR        = "transaction.state.log.min.isr"
	KafkaLogCleanerMinLagSec             = "log.cleaner.min.compaction.lag.ms"
	KafkaLogCleanerBackoffMS             = "log.cleaner.backoff.ms"

	KafkaCCKubernetesMode                 = "cruise.control.metrics.reporter.kubernetes.mode"
	KafkaCCBootstrapServers               = "cruise.control.metrics.reporter.bootstrap.servers"
	KafkaCCMetricTopicAutoCreate          = "cruise.control.metrics.topic.auto.create"
	KafkaCCMetricTopicNumPartition        = "cruise.control.metrics.topic.num.partitions"
	KafkaCCMetricTopicReplica             = "cruise.control.metrics.topic.replication.factor"
	KafkaCCMetricReporterSecurityProtocol = "cruise.control.metrics.reporter.security.protocol"
	KafkaCCMetricReporterSaslMechanism    = "cruise.control.metrics.reporter.sasl.mechanism"
	KafkaCCSampleLoadingThreadsNum        = "num.sample.loading.threads"
	KafkaCCMinSamplesPerBrokerWindow      = "min.samples.per.broker.metrics.window"

	KafkaVolumeData         = "data"
	KafkaVolumeConfig       = "kafkaconfig"
	KafkaVolumeTempConfig   = "temp-config"
	KafkaVolumeCustomConfig = "custom-config"

	EnvKafkaUser     = "KAFKA_USER"
	EnvKafkaPassword = "KAFKA_PASSWORD"

	KafkaListenerPLAINTEXTProtocol = "PLAINTEXT"
	KafkaListenerSASLProtocol      = "SASL_PLAINTEXT"
	KafkaListenerSASLSSLProtocol   = "SASL_SSL"

	KafkaCCMetricsSampler         = "com.linkedin.kafka.cruisecontrol.monitor.sampling.CruiseControlMetricsReporterSampler"
	KafkaAdminTopicConfigProvider = "com.linkedin.kafka.cruisecontrol.config.KafkaAdminTopicConfigProvider"
	KafkaCCMetricReporter         = "com.linkedin.kafka.cruisecontrol.metricsreporter.CruiseControlMetricsReporter"
	KafkaJMXMetricReporter        = "org.apache.kafka.common.metrics.JmxReporter"

	// =========================== Solr Constants ============================
	ResourceCodeSolr      = "sl"
	ResourceKindSolr      = "Solr"
	ResourceSingularSolr  = "solr"
	ResourcePluralSolr    = "solrs"
	SolrPortName          = "http"
	SolrRestPort          = 8983
	SolrExporterPort      = 9854
	SolrSecretKey         = "solr.xml"
	SolrContainerName     = "solr"
	SolrInitContainerName = "init-solr"
	SolrAdmin             = "admin"
	SecurityJSON          = "security.json"
	SolrZkDigest          = "zk-digest"
	SolrZkReadonlyDigest  = "zk-digest-readonly"

	SolrVolumeDefaultConfig = "default-config"
	SolrVolumeCustomConfig  = "custom-config"
	SolrVolumeAuthConfig    = "auth-config"
	SolrVolumeData          = "data"
	SolrVolumeConfig        = "slconfig"

	DistLibs              = "/opt/solr/dist"
	ContribLibs           = "/opt/solr/contrib/%s/lib"
	SysPropLibPlaceholder = "${solr.sharedLib:}"
	SolrHomeDir           = "/var/solr"
	SolrDataDir           = "/var/solr/data"
	SolrTempConfigDir     = "/temp-config"
	SolrCustomConfigDir   = "/custom-config"
	SolrSecurityConfigDir = "/var/security"

	SolrCloudHostKey                       = "host"
	SolrCloudHostValue                     = ""
	SolrCloudHostPortKey                   = "hostPort"
	SolrCloudHostPortValue                 = 80
	SolrCloudHostContextKey                = "hostContext"
	SolrCloudHostContextValue              = "solr"
	SolrCloudGenericCoreNodeNamesKey       = "genericCoreNodeNames"
	SolrCloudGenericCoreNodeNamesValue     = true
	SolrCloudZKClientTimeoutKey            = "zkClientTimeout"
	SolrCloudZKClientTimeoutValue          = 30000
	SolrCloudDistribUpdateSoTimeoutKey     = "distribUpdateSoTimeout"
	SolrCloudDistribUpdateSoTimeoutValue   = 600000
	SolrCloudDistribUpdateConnTimeoutKey   = "distribUpdateConnTimeout"
	SolrCloudDistribUpdateConnTimeoutValue = 60000
	SolrCloudZKCredentialProviderKey       = "zkCredentialsProvider"
	SolrCloudZKCredentialProviderValue     = "org.apache.solr.common.cloud.DigestZkCredentialsProvider"
	SolrCloudZKAclProviderKey              = "zkACLProvider"
	SolrCloudZKAclProviderValue            = "org.apache.solr.common.cloud.DigestZkACLProvider"
	SolrCloudZKCredentialsInjectorKey      = "zkCredentialsInjector"
	SolrCloudZKCredentialsInjectorValue    = "org.apache.solr.common.cloud.VMParamsZkCredentialsInjector"

	ShardHandlerFactorySocketTimeoutKey   = "socketTimeout"
	ShardHandlerFactorySocketTimeoutValue = 600000
	ShardHandlerFactoryConnTimeoutKey     = "connTimeout"
	ShardHandlerFactoryConnTimeoutValue   = 60000

	SolrKeysMaxBooleanClausesKey   = "maxBooleanClauses"
	SolrKeysMaxBooleanClausesValue = "solr.max.booleanClauses"
	SolrKeysSharedLibKey           = "sharedLib"
	SolrKeysShardLibValue          = "solr.sharedLib"
	SolrKeysHostPortKey            = "hostPort"
	SolrKeysHostPortValue          = "solr.port.advertise"
	SolrKeysAllowPathsKey          = "allowPaths"
	SolrKeysAllowPathsValue        = "solr.allowPaths"

	SolrConfMaxBooleanClausesKey   = "maxBooleanClauses"
	SolrConfMaxBooleanClausesValue = 1024
	SolrConfAllowPathsKey          = "allowPaths"
	SolrConfAllowPathsValue        = ""
	SolrConfSolrCloudKey           = "solrcloud"
	SolrConfShardHandlerFactoryKey = "shardHandlerFactory"
)
View Source
const (
	DruidConfigDirCommon              = "/opt/druid/conf/druid/cluster/_common"
	DruidConfigDirCoordinatorOverlord = "/opt/druid/conf/druid/cluster/master/coordinator-overlord"
	DruidConfigDirHistoricals         = "/opt/druid/conf/druid/cluster/data/historical"
	DruidConfigDirMiddleManagers      = "/opt/druid/conf/druid/cluster/data/middleManager"
	DruidConfigDirBrokers             = "/opt/druid/conf/druid/cluster/query/broker"
	DruidConfigDirRouters             = "/opt/druid/conf/druid/cluster/query/router"
	DruidCConfigDirMySQLMetadata      = "/opt/druid/extensions/mysql-metadata-storage"

	DruidVolumeOperatorConfig = "operator-config-volume"
	DruidVolumeMainConfig     = "main-config-volume"
	DruidVolumeCustomConfig   = "custom-config"

	DruidOperatorConfigDir = "/tmp/config/operator-config"
	DruidMainConfigDir     = "/opt/druid/conf"
	DruidCustomConfigDir   = "/tmp/config/custom-config"

	DruidVolumeCommonConfig          = "common-config-volume"
	DruidCommonConfigFile            = "common.runtime.properties"
	DruidCoordinatorsJVMConfigFile   = "coordinators.jvm.config"
	DruidHistoricalsJVMConfigFile    = "historicals.jvm.config"
	DruidBrokersJVMConfigFile        = "brokers.jvm.config"
	DruidMiddleManagersJVMConfigFile = "middleManagers.jvm.config"
	DruidRoutersJVMConfigFile        = "routers.jvm.config"
	DruidCoordinatorsConfigFile      = "coordinators.properties"
	DruidHistoricalsConfigFile       = "historicals.properties"
	DruidMiddleManagersConfigFile    = "middleManagers.properties"
	DruidBrokersConfigFile           = "brokers.properties"
	DruidRoutersConfigFile           = "routers.properties"
	DruidVolumeMySQLMetadataStorage  = "mysql-metadata-storage"

	DruidContainerName     = "druid"
	DruidInitContainerName = "init-druid"
	DruidUserAdmin         = "admin"

	EnvDruidAdminPassword          = "DRUID_ADMIN_PASSWORD"
	EnvDruidMetdataStoragePassword = "DRUID_METADATA_STORAGE_PASSWORD"
	EnvDruidZKServicePassword      = "DRUID_ZK_SERVICE_PASSWORD"
	EnvDruidCoordinatorAsOverlord  = "DRUID_COORDINATOR_AS_OVERLORD"

	DruidPortCoordinators   = 8081
	DruidPortOverlords      = 8090
	DruidPortHistoricals    = 8083
	DruidPortMiddleManagers = 8091
	DruidPortBrokers        = 8082
	DruidPortRouters        = 8888
	DruidExporterPort       = 9104

	// Common Runtime Configurations Properties
	// ZooKeeperSpec
	DruidZKServiceHost              = "druid.zk.service.host"
	DruidZKPathsBase                = "druid.zk.paths.base"
	DruidZKServiceCompress          = "druid.zk.service.compress"
	DruidZKServiceUserKey           = "druid.zk.service.user"
	DruidZKServicePasswordKey       = "druid.zk.service.pwd"
	DruidZKServicePasswordEnvConfig = "{\"type\": \"environment\", \"variable\": \"DRUID_ZK_SERVICE_PASSWORD\"}"

	// Metadata Storage
	DruidMetadataStorageTypeKey                    = "druid.metadata.storage.type"
	DruidMetadataStorageConnectorConnectURI        = "druid.metadata.storage.connector.connectURI"
	DruidMetadataStorageConnectURIPrefixMySQL      = "jdbc:mysql://"
	DruidMetadataStorageConnectURIPrefixPostgreSQL = "jdbc:postgresql://"
	DruidMetadataStorageConnectorUser              = "druid.metadata.storage.connector.user"
	DruidMetadataStorageConnectorPassword          = "druid.metadata.storage.connector.password"
	DruidMetadataStorageConnectorPasswordEnvConfig = "{\"type\": \"environment\", \"variable\": \"DRUID_METADATA_STORAGE_PASSWORD\"}"
	DruidMetadataStorageCreateTables               = "druid.metadata.storage.connector.createTables"

	// Deep Storage
	DruidDeepStorageTypeKey      = "druid.storage.type"
	DruidDeepStorageTypeS3       = "s3"
	DruidDeepStorageBaseKey      = "druid.storage.baseKey"
	DruidDeepStorageBucket       = "druid.storage.bucket"
	DruidS3AccessKey             = "druid.s3.accessKey"
	DruidS3SecretKey             = "druid.s3.secretKey"
	DruidS3EndpointSigningRegion = "druid.s3.endpoint.signingRegion"
	DruidS3EnablePathStyleAccess = "druid.s3.enablePathStyleAccess"
	DruidS3EndpointURL           = "druid.s3.endpoint.url"

	// Indexing service logs
	DruidIndexerLogsType           = "druid.indexer.logs.type"
	DruidIndexerLogsS3Bucket       = "druid.indexer.logs.s3Bucket"
	DruidIndexerLogsS3Prefix       = "druid.indexer.logs.s3Prefix"
	DruidEnableLookupSyncOnStartup = "druid.lookup.enableLookupSyncOnStartup"

	// Authentication
	DruidAuthAuthenticationChain                             = "druid.auth.authenticatorChain"
	DruidAuthAuthenticationChainValueBasic                   = "[\"basic\"]"
	DruidAuthAuthenticatorBasicType                          = "druid.auth.authenticator.basic.type"
	DruidAuthAuthenticatorBasicTypeValue                     = "basic"
	DruidAuthAuthenticatorBasicInitialAdminPassword          = "druid.auth.authenticator.basic.initialAdminPassword"
	DruidAuthAuthenticatorBasicInitialAdminPasswordEnvConfig = "{\"type\": \"environment\", \"variable\": \"DRUID_ADMIN_PASSWORD\"}"
	DruidAuthAuthenticatorBasicInitialInternalClientPassword = "druid.auth.authenticator.basic.initialInternalClientPassword"
	DruidAuthAuthenticatorBasicCredentialsValidatorType      = "druid.auth.authenticator.basic.credentialsValidator.type"
	DruidAuthAuthenticatorBasicSkipOnFailure                 = "druid.auth.authenticator.basic.skipOnFailure"
	DruidAuthAuthenticatorBasicAuthorizerName                = "druid.auth.authenticator.basic.authorizerName"

	// Escalator
	DruidAuthEscalatorType                   = "druid.escalator.type"
	DruidAuthEscalatorInternalClientUsername = "druid.escalator.internalClientUsername"
	DruidAuthEscalatorInternalClientPassword = "druid.escalator.internalClientPassword"
	DruidAuthEscalatorAuthorizerName         = "druid.escalator.authorizerName"
	DruidAuthAuthorizers                     = "druid.auth.authorizers"
	DruidAuthAuthorizerBasicType             = "druid.auth.authorizer.basic.type"

	// Extension Load List
	DruidExtensionLoadListKey               = "druid.extensions.loadList"
	DruidExtensionLoadList                  = "" /* 217-byte string literal not displayed */
	DruidExtensionAvro                      = "druid-avro-extensions"
	DruidExtensionS3                        = "druid-s3-extensions"
	DruidExtensionHDFS                      = "druid-hdfs-storage"
	DruidExtensionGoogle                    = "druid-google-extensions"
	DruidExtensionAzure                     = "druid-azure-extensions"
	DruidExtensionKafkaIndexingService      = "druid-kafka-indexing-service"
	DruidExtensionDataSketches              = "druid-datasketches"
	DruidExtensionKubernetes                = "druid-kubernetes-extensions"
	DruidExtensionMySQLMetadataStorage      = "mysql-metadata-storage"
	DruidExtensionPostgreSQLMetadataStorage = "postgresql-metadata-storage"
	DruidExtensionBasicSecurity             = "druid-basic-security"
	DruidExtensionMultiStageQuery           = "druid-multi-stage-query"
	DruidExtensionPrometheusEmitter         = "prometheus-emitter"
	DruidService                            = "druid.service"

	// Monitoring Configurations
	DruidEmitter                                = "druid.emitter"
	DruidEmitterPrometheus                      = "prometheus"
	DruidEmitterPrometheusPortKey               = "druid.emitter.prometheus.port"
	DruidEmitterPrometheusPortVal               = 9104
	DruidMonitoringMonitorsKey                  = "druid.monitoring.monitors"
	DruidEmitterPrometheusDimensionMapPath      = "druid.emitter.prometheus.dimensionMapPath"
	DruidEmitterPrometheusStrategy              = "druid.emitter.prometheus.strategy"
	DruidMetricsJVMMonitor                      = "org.apache.druid.java.util.metrics.JvmMonitor"
	DruidMetricsServiceStatusMonitor            = "org.apache.druid.server.metrics.ServiceStatusMonitor"
	DruidMetricsQueryCountStatsMonitor          = "org.apache.druid.server.metrics.QueryCountStatsMonitor"
	DruidMonitoringHistoricalMetricsMonitor     = "org.apache.druid.server.metrics.HistoricalMetricsMonitor"
	DruidMonitoringSegmentsStatsMonitor         = "org.apache.druid.server.metrics.SegmentStatsMonitor"
	DruidMonitoringWorkerTaskCountsStatsMonitor = "org.apache.druid.server.metrics.WorkerTaskCountStatsMonitor"
	DruidMonitoringQueryCountStatsMonitor       = "org.apache.druid.server.metrics.QueryCountStatsMonitor"
	DruidMonitoringTaskCountStatsMonitor        = "org.apache.druid.server.metrics.TaskCountStatsMonitor"
	DruidMonitoringSysMonitor                   = "org.apache.druid.java.util.metrics.SysMonitor"

	/// Coordinators Configurations
	DruidCoordinatorStartDelay                = "druid.coordinator.startDelay"
	DruidCoordinatorPeriod                    = "druid.coordinator.period"
	DruidIndexerQueueStartDelay               = "druid.indexer.queue.startDelay"
	DruidManagerSegmentsPollDuration          = "druid.manager.segments.pollDuration"
	DruidCoordinatorKillAuditLogOn            = "druid.coordinator.kill.audit.on"
	DruidMillisToWaitBeforeDeleting           = "millisToWaitBeforeDeleting"
	DruidCoordinatorAsOverlord                = "druid.coordinator.asOverlord.enabled"
	DruidCoordinatorAsOverlordOverlordService = "druid.coordinator.asOverlord.overlordService"

	/// Overlords Configurations
	DruidServiceNameOverlords            = "druid/overlord"
	DruidIndexerStorageType              = "druid.indexer.storage.type"
	DruidIndexerAuditLogEnabled          = "druid.indexer.auditLog.enabled"
	DruidIndexerLogsKillEnables          = "druid.indexer.logs.kill.enabled"
	DruidIndexerLogsKillDurationToRetain = "druid.indexer.logs.kill.durationToRetain"
	DruidIndexerLogsKillInitialDelay     = "druid.indexer.logs.kill.initialDelay"
	DruidIndexerLogsKillDelay            = "druid.indexer.logs.kill.delay"

	DruidEmitterLoggingLogLevel = "druid.emitter.logging.logLevel"

	/// Historicals Configurations
	// Properties
	DruidProcessingNumOfThreads = "druid.processing.numThreads"

	// Segment Cache
	DruidHistoricalsSegmentCacheLocations              = "druid.segmentCache.locations"
	DruidHistoricalsSegmentCacheDropSegmentDelayMillis = "druid.segmentCache.dropSegmentDelayMillis"
	DruidHistoricalsSegmentCacheDir                    = "/druid/data/segments"
	DruidVolumeHistoricalsSegmentCache                 = "segment-cache"

	// Query Cache
	DruidHistoricalCacheUseCache      = "druid.historical.cache.useCache"
	DruidHistoricalCachePopulateCache = "druid.historical.cache.populateCache"
	DruidCacheSizeInBytes             = "druid.cache.sizeInBytes"

	// Values
	DruidSegmentCacheLocationsDefaultValue = "[{\"path\":\"/druid/data/segments\",\"maxSize\":10737418240}]"

	/// MiddleManagers Configurations
	// Properties
	DruidWorkerCapacity                                    = "druid.worker.capacity"
	DruidIndexerTaskBaseTaskDir                            = "druid.indexer.task.baseTaskDir"
	DruidWorkerTaskBaseTaskDirKey                          = "druid.worker.task.baseTaskDir"
	DruidWorkerTaskBaseTaskDir                             = "/var/druid/task"
	DruidWorkerBaseTaskDirSize                             = "druid.worker.baseTaskDirSize"
	DruidIndexerForkPropertyDruidProcessingBufferSizeBytes = "druid.indexer.fork.property.druid.processing.buffer.sizeBytes"
	DruidMiddleManagersVolumeBaseTaskDir                   = "base-task-dir"
	DruidVolumeMiddleManagersBaseTaskDir                   = "base-task-dir"

	// Values
	DruidIndexerTaskBaseTaskDirValue = "/druid/data/baseTaskDir"

	/// Brokers Configurations
	DruidBrokerHTTPNumOfConnections = "druid.broker.http.numConnections"
	DruidSQLEnable                  = "druid.sql.enable"

	/// Routers Configurations
	DruidRouterHTTPNumOfConnections = "druid.router.http.numConnections"
	DruidRouterHTTPNumOfMaxThreads  = "druid.router.http.numMaxThreads"

	// Common Nodes Configurations
	// Properties
	DruidPlaintextPort               = "druid.plaintextPort"
	DruidProcessingBufferSizeBytes   = "druid.processing.buffer.sizeBytes"
	DruidProcessingNumOfMergeBuffers = "druid.processing.numMergeBuffers"
	DruidServerHTTPNumOfThreads      = "druid.server.http.numThreads"

	// Health Check
	DruidHealthDataZero = "0"
	DruidHealthDataOne  = "1"
)

=========================== Druid Constants ============================

View Source
const (
	RabbitMQAMQPPort          = 5672
	RabbitMQPeerDiscoveryPort = 4369
	RabbitMQManagementUIPort  = 15672
	RabbitMQExporterPort      = 15692
	RabbitMQInterNodePort     = 25672

	RabbitMQVolumeData         = "data"
	RabbitMQVolumeConfig       = "rabbitmqconfig"
	RabbitMQVolumeTempConfig   = "temp-config"
	RabbitMQVolumeCustomConfig = "custom-config"

	RabbitMQDataDir         = "/var/lib/rabbitmq/mnesia"
	RabbitMQConfigDir       = "/config/"
	RabbitMQPluginsDir      = "/etc/rabbitmq/"
	RabbitMQCertDir         = "/var/private/ssl"
	RabbitMQTempConfigDir   = "/tmp/config/"
	RabbitMQCustomConfigDir = "/tmp/config/custom_config/"

	RabbitMQConfigVolName     = "rabbitmq-config"
	RabbitMQPluginsVolName    = "rabbitmq-plugins"
	RabbitMQTempConfigVolName = "temp-config"

	RabbitMQContainerName              = "rabbitmq"
	RabbitMQInitContainerName          = "rabbitmq-init"
	RabbitMQManagementPlugin           = "rabbitmq_management"
	RabbitMQPeerdiscoveryPlugin        = "rabbitmq_peer_discovery_k8s"
	RabbitMQFederationPlugin           = "rabbitmq_federation"
	RabbitMQFederationManagementPlugin = "rabbitmq_federation_management"
	RabbitMQShovelPlugin               = "rabbitmq_shovel"
	RabbitMQShovelManagementPlugin     = "rabbitmq_shovel_management"
	RabbitMQWebDispatchPlugin          = "rabbitmq_web_dispatch"
	RabbitMQLoopBackUserKey            = "loopback_users"
	RabbitMQLoopBackUserVal            = "none"
	RabbitMQDefaultTCPListenerKey      = "listeners.tcp.default"
	RabbitMQDefaultSSLListenerKey      = "listeners.ssl.default"
	RabbitMQDefaultTCPListenerVal      = "5672"
	RabbitMQDefaultTLSListenerVal      = "5671"
	RabbitMQQueueMasterLocatorKey      = "queue_master_locator"
	RabbitMQQueueMasterLocatorVal      = "min-masters"
	RabbitMQDiskFreeLimitKey           = "disk_free_limit.absolute"
	RabbitMQDiskFreeLimitVal           = "2GB"
	RabbitMQPartitionHandingKey        = "cluster_partition_handling"
	RabbitMQPartitionHandingVal        = "pause_minority"
	RabbitMQPeerDiscoveryKey           = "cluster_formation.peer_discovery_backend"
	RabbitMQPeerDiscoveryVal           = "rabbit_peer_discovery_k8s"
	RabbitMQK8sHostKey                 = "cluster_formation.k8s.host"
	RabbitMQK8sHostVal                 = "kubernetes.default.svc.cluster.local"
	RabbitMQK8sAddressTypeKey          = "cluster_formation.k8s.address_type"
	RabbitMQK8sAddressTypeVal          = "hostname"
	RabbitMQNodeCleanupWarningKey      = "cluster_formation.node_cleanup.only_log_warning"
	RabbitMQNodeCleanupWarningVal      = "true"
	RabbitMQLogFileLevelKey            = "log.file.level"
	RabbitMQLogFileLevelVal            = "info"
	RabbitMQLogConsoleKey              = "log.console"
	RabbitMQLogConsoleVal              = "true"
	RabbitMQLogConsoleLevelKey         = "log.console.level"
	RabbitMQLogConsoleLevelVal         = "info"
	RabbitMQDefaultUserKey             = "default_user"
	RabbitMQDefaultUserVal             = "$(RABBITMQ_DEFAULT_USER)"
	RabbitMQDefaultPasswordKey         = "default_pass"
	RabbitMQDefaultPasswordVal         = "$(RABBITMQ_DEFAULT_PASS)"
	RabbitMQClusterNameKey             = "cluster_name"
	RabbitMQK8sSvcNameKey              = "cluster_formation.k8s.service_name"
	RabbitMQSSLOptionsCAKey            = "ssl_options.cacertfile"
	RabbitMQSSLOptionsCertKey          = "ssl_options.certfile"
	RabbitMQSSLOptionsPrivateKey       = "ssl_options.keyfile"
	RabbitMQSSLOptionsVerifyKey        = "ssl_options.verify"
	RabbitMQSSLOptionsFailIfNoPeerKey  = "ssl_options.fail_if_no_peer_cert"
	RabbitMQConfigFileName             = "rabbitmq.conf"
	RabbitMQEnabledPluginsFileName     = "enabled_plugins"
	RabbitMQHealthCheckerQueueName     = "kubedb-system"
)
View Source
const (

	// envs
	EnvFerretDBUser     = "FERRETDB_PG_USER"
	EnvFerretDBPassword = "FERRETDB_PG_PASSWORD"
	EnvFerretDBHandler  = "FERRETDB_HANDLER"
	EnvFerretDBPgURL    = "FERRETDB_POSTGRESQL_URL"
	EnvFerretDBTLSPort  = "FERRETDB_LISTEN_TLS"
	EnvFerretDBCAPath   = "FERRETDB_LISTEN_TLS_CA_FILE"
	EnvFerretDBCertPath = "FERRETDB_LISTEN_TLS_CERT_FILE"
	EnvFerretDBKeyPath  = "FERRETDB_LISTEN_TLS_KEY_FILE"

	FerretDBContainerName = "ferretdb"
	FerretDBMainImage     = "ghcr.io/ferretdb/ferretdb"
	FerretDBUser          = "postgres"

	FerretDBServerPath = "/etc/certs/server"

	FerretDBDefaultPort = 27017
	FerretDBMetricsPort = 8080
	FerretDBTLSPort     = 27018

	FerretDBMetricsPath = "/debug/metrics"
)

=========================== FerretDB Constants ============================

View Source
const (
	ClickHouseKeeperPort  = 9181
	ClickHouseDefaultHTTP = 8123
	ClickHouseDefaultTLS  = 8443
	ClickHouseNativeTCP   = 9000
	ClickHouseNativeTLS   = 9440
	ClickhousePromethues  = 9363

	ClickHouseVolumeData         = "data"
	ClickHouseDataDir            = "/var/lib/clickhouse"
	ClickHouseConfigVolName      = "clickhouse-config"
	ClickHouseConfigDir          = "/etc/clickhouse-server/config.d"
	ClickHouseDefaultStorageSize = "2Gi"

	ClickHouseClusterConfigVolName = "cluster-config"
	ClickHouseClusterConfigDir     = "/etc/clickhouse-server/conf.d"

	ClickHouseTempClusterConfigVolName = "temp-cluster-config"

	ClickHouseContainerName     = "clickhouse"
	ClickHouseInitContainerName = "clickhouse-init"

	ClickHouseClusterConfigFile = "cluster-config.yaml"
	ClickHouseTempConfigDir     = "/ch-tmp/config"
	ClickHouseTempDir           = "/ch-tmp"

	ClickHouseUserConfigDir  = "/etc/clickhouse-server/user.d"
	ClickHouseMacrosFileName = "macros.yaml"

	ClickHouseStandalone = "standalone"
	ClickHouseCluster    = "cluster"

	ClickHouseHealthCheckerDatabase = "kubedb_system_db"
	ClickHouseHealthCheckerTable    = "kubedb_system_table"

	ClickHouseServerConfigFile = "server-config.yaml"
	ClickHouseKeeperFileConfig = "keeper-config.yaml"
)
View Source
const (
	ResourceKindStatefulSet = "StatefulSet"
	ResourceKindPetSet      = "PetSet"
)

Resource kind related constants

View Source
const (
	InitFromGit          = "init-from-git"
	InitFromGitMountPath = "/git"
	GitSecretVolume      = "git-secret"
	GitSecretMountPath   = "/etc/git-secret"
	GitSyncContainerName = "git-sync"
)
View Source
const (
	ResourceCodeDruid     = "dr"
	ResourceKindDruid     = "Druid"
	ResourceSingularDruid = "druid"
	ResourcePluralDruid   = "druids"
)
View Source
const (
	ResourceCodeElasticsearch     = "es"
	ResourceKindElasticsearch     = "Elasticsearch"
	ResourceSingularElasticsearch = "elasticsearch"
	ResourcePluralElasticsearch   = "elasticsearches"
)
View Source
const (
	ResourceCodeEtcd     = "etc"
	ResourceKindEtcd     = "Etcd"
	ResourceSingularEtcd = "etcd"
	ResourcePluralEtcd   = "etcds"
)
View Source
const (
	ResourceCodeFerretDB     = "fr"
	ResourceKindFerretDB     = "FerretDB"
	ResourceSingularFerretDB = "ferretdb"
	ResourcePluralFerretDB   = "ferretdbs"
)
View Source
const (
	ResourceCodeKafka     = "kf"
	ResourceKindKafka     = "Kafka"
	ResourceSingularKafka = "kafka"
	ResourcePluralKafka   = "kafkas"
)
View Source
const (
	ResourceCodeMariaDB     = "md"
	ResourceKindMariaDB     = "MariaDB"
	ResourceSingularMariaDB = "mariadb"
	ResourcePluralMariaDB   = "mariadbs"
)
View Source
const (
	ResourceCodeMemcached     = "mc"
	ResourceKindMemcached     = "Memcached"
	ResourceSingularMemcached = "memcached"
	ResourcePluralMemcached   = "memcacheds"
)
View Source
const (
	TLSCAKeyFileName    = "ca.key"
	TLSCACertFileName   = "ca.crt"
	MongoPemFileName    = "mongo.pem"
	MongoClientFileName = "client.pem"
	MongoCertDirectory  = "/var/run/mongodb/tls"

	MongoDBShardLabelKey  = "mongodb.kubedb.com/node.shard"
	MongoDBConfigLabelKey = "mongodb.kubedb.com/node.config"
	MongoDBMongosLabelKey = "mongodb.kubedb.com/node.mongos"
	MongoDBTypeLabelKey   = "mongodb.kubedb.com/node.type"

	MongoDBShardAffinityTemplateVar = "SHARD_INDEX"
)
View Source
const (
	ResourceCodeMongoDB     = "mg"
	ResourceKindMongoDB     = "MongoDB"
	ResourceSingularMongoDB = "mongodb"
	ResourcePluralMongoDB   = "mongodbs"
)
View Source
const (
	ResourceCodeMSSQLServer     = "ms"
	ResourceKindMSSQLServer     = "MSSQLServer"
	ResourceSingularMSSQLServer = "mssqlserver"
	ResourcePluralMSSQLServer   = "mssqlservers"
)
View Source
const (
	ResourceCodeMySQL     = "my"
	ResourceKindMySQL     = "MySQL"
	ResourceSingularMySQL = "mysql"
	ResourcePluralMySQL   = "mysqls"
)
View Source
const (
	ResourceCodePerconaXtraDB     = "px"
	ResourceKindPerconaXtraDB     = "PerconaXtraDB"
	ResourceSingularPerconaXtraDB = "perconaxtradb"
	ResourcePluralPerconaXtraDB   = "perconaxtradbs"
)
View Source
const (
	ResourceCodePgBouncer     = "pb"
	ResourceKindPgBouncer     = "PgBouncer"
	ResourceSingularPgBouncer = "pgbouncer"
	ResourcePluralPgBouncer   = "pgbouncers"
)
View Source
const (
	ResourceCodePgpool     = "pp"
	ResourceKindPgpool     = "Pgpool"
	ResourceSingularPgpool = "pgpool"
	ResourcePluralPgpool   = "pgpools"
)
View Source
const (
	ResourceCodePostgres     = "pg"
	ResourceKindPostgres     = "Postgres"
	ResourceSingularPostgres = "postgres"
	ResourcePluralPostgres   = "postgreses"
)
View Source
const (
	ResourceCodeProxySQL     = "prx"
	ResourceKindProxySQL     = "ProxySQL"
	ResourceSingularProxySQL = "proxysql"
	ResourcePluralProxySQL   = "proxysqls"
)
View Source
const (
	ResourceCodeRabbitmq     = "rm"
	ResourceKindRabbitmq     = "RabbitMQ"
	ResourceSingularRabbitmq = "rabbitmq"
	ResourcePluralRabbitmq   = "rabbitmqs"
)
View Source
const (
	ResourceCodeRedisSentinel     = "rds"
	ResourceKindRedisSentinel     = "RedisSentinel"
	ResourceSingularRedisSentinel = "redissentinel"
	ResourcePluralRedisSentinel   = "redissentinels"
)
View Source
const (
	ResourceCodeRedis     = "rd"
	ResourceKindRedis     = "Redis"
	ResourceSingularRedis = "redis"
	ResourcePluralRedis   = "redises"
)
View Source
const (
	ResourceCodeSinglestore     = "sdb"
	ResourceKindSinglestore     = "Singlestore"
	ResourceSingularSinglestore = "singlestore"
	ResourcePluralSinglestore   = "singlestores"
)
View Source
const (
	ResourceCodeZooKeeper     = "zk"
	ResourceKindZooKeeper     = "ZooKeeper"
	ResourceSingularZooKeeper = "zookeeper"
	ResourcePluralZooKeeper   = "zookeepers"
)
View Source
const (
	ElasticsearchNodeAffinityTemplateVar = "NODE_ROLE"
)
View Source
const (
	RedisShardAffinityTemplateVar = "SHARD_INDEX"
)

Variables

View Source
var (
	DefaultInitContainerResource = core.ResourceRequirements{
		Requests: core.ResourceList{
			core.ResourceCPU:    resource.MustParse(".200"),
			core.ResourceMemory: resource.MustParse("256Mi"),
		},
		Limits: core.ResourceList{
			core.ResourceMemory: resource.MustParse("512Mi"),
		},
	}
	DefaultResources = core.ResourceRequirements{
		Requests: core.ResourceList{
			core.ResourceCPU:    resource.MustParse(".500"),
			core.ResourceMemory: resource.MustParse("1024Mi"),
		},
		Limits: core.ResourceList{
			core.ResourceMemory: resource.MustParse("1024Mi"),
		},
	}
	// CoordinatorDefaultResources must be used for raft backed coordinators to avoid unintended leader switches
	CoordinatorDefaultResources = core.ResourceRequirements{
		Requests: core.ResourceList{
			core.ResourceCPU:    resource.MustParse(".200"),
			core.ResourceMemory: resource.MustParse("256Mi"),
		},
		Limits: core.ResourceList{
			core.ResourceMemory: resource.MustParse("256Mi"),
		},
	}

	// DefaultResourcesCPUIntensive is for MongoDB versions >= 6
	DefaultResourcesCPUIntensive = core.ResourceRequirements{
		Requests: core.ResourceList{
			core.ResourceCPU:    resource.MustParse(".800"),
			core.ResourceMemory: resource.MustParse("1024Mi"),
		},
		Limits: core.ResourceList{
			core.ResourceMemory: resource.MustParse("1024Mi"),
		},
	}

	// DefaultResourcesMemoryIntensive must be used for elasticsearch
	// to avoid OOMKILLED while deploying ES V8
	DefaultResourcesMemoryIntensive = core.ResourceRequirements{
		Requests: core.ResourceList{
			core.ResourceCPU:    resource.MustParse(".500"),
			core.ResourceMemory: resource.MustParse("1.5Gi"),
		},
		Limits: core.ResourceList{
			core.ResourceMemory: resource.MustParse("1.5Gi"),
		},
	}

	// DefaultResourcesCoreAndMemoryIntensive must be used for Solr
	DefaultResourcesCoreAndMemoryIntensiveSolr = core.ResourceRequirements{
		Requests: core.ResourceList{
			core.ResourceCPU:    resource.MustParse(".900"),
			core.ResourceMemory: resource.MustParse("2Gi"),
		},
		Limits: core.ResourceList{
			core.ResourceMemory: resource.MustParse("2Gi"),
		},
	}

	// DefaultResourcesMemoryIntensiveSDB must be used for Singlestore when enabled monitoring or version >= 8.5.x
	DefaultResourcesMemoryIntensiveSDB = core.ResourceRequirements{
		Requests: core.ResourceList{
			core.ResourceCPU:    resource.MustParse(".500"),
			core.ResourceMemory: resource.MustParse("2Gi"),
		},
		Limits: core.ResourceList{
			core.ResourceMemory: resource.MustParse("2Gi"),
		},
	}

	// DefaultResourcesMemoryIntensive must be used for Druid MiddleManagers
	DefaultResourcesMemoryIntensiveDruid = core.ResourceRequirements{
		Requests: core.ResourceList{
			core.ResourceCPU:    resource.MustParse(".500"),
			core.ResourceMemory: resource.MustParse("2.5Gi"),
		},
		Limits: core.ResourceList{
			core.ResourceMemory: resource.MustParse("2.5Gi"),
		},
	}
)
View Source
var (
	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
	SchemeBuilder runtime.SchemeBuilder

	AddToScheme = localSchemeBuilder.AddToScheme
)
View Source
var (
	DefaultClient client.Client
)
View Source
var PgpoolReservedVolumes = []string{
	PgpoolConfigVolumeName,
	PgpoolTlsVolumeName,
}
View Source
var PgpoolReservedVolumesMountPaths = []string{
	PgpoolConfigSecretMountPath,
	PgpoolTlsVolumeMountPath,
}
View Source
var SchemeGroupVersion = schema.GroupVersion{Group: kubedb.GroupName, Version: "v1alpha2"}

Functions

func DefaultArbiter added in v0.38.0

func DefaultArbiter(computeOnly bool) core.ResourceRequirements

func FerretDBValidateEnvVar added in v0.41.0

func FerretDBValidateEnvVar(envs []core.EnvVar, forbiddenEnvs []string, resourceType string) error

func GetDatabasePods added in v0.24.0

func GetDatabasePods(db metav1.Object, stsLister appslister.StatefulSetLister, pods []core.Pod) ([]core.Pod, error)

func GetDatabasePodsByPetSetLister added in v0.44.0

func GetDatabasePodsByPetSetLister(db metav1.Object, psLister pslister.PetSetLister, pods []core.Pod) ([]core.Pod, error)

func GetServiceTemplate added in v0.15.0

func GetServiceTemplate(templates []NamedServiceTemplateSpec, alias ServiceAlias) ofst.ServiceTemplateSpec

GetServiceTemplate returns a pointer to the desired serviceTemplate referred by "aliaS". Otherwise, it returns nil.

func GetSharedBufferSizeForPostgres added in v0.19.0

func GetSharedBufferSizeForPostgres(resource *resource.Quantity) string

GetSharedBufferSizeForPostgres this func takes a input type int64 which is in bytes return the 25% of the input in Bytes

func HasServiceTemplate added in v0.15.0

func HasServiceTemplate(templates []NamedServiceTemplateSpec, alias ServiceAlias) bool

HasServiceTemplate returns "true" if the desired serviceTemplate provided in "aliaS" is present in the serviceTemplate list. Otherwise, it returns "false".

func Kind

func Kind(kind string) schema.GroupKind

Kind takes an unqualified kind and returns a Group qualified GroupKind

func MySQLExporterTLSArg added in v0.16.0

func MySQLExporterTLSArg() string

func MySQLRequireSSLArg added in v0.16.0

func MySQLRequireSSLArg() string

func PgpoolGetMainContainerEnvs added in v0.41.0

func PgpoolGetMainContainerEnvs(p *Pgpool) []core.EnvVar

func PgpoolValidateVersion added in v0.41.0

func PgpoolValidateVersion(p *Pgpool) error

func PgpoolValidateVolumes added in v0.41.0

func PgpoolValidateVolumes(p *Pgpool) error

func PgpoolValidateVolumesMountPaths added in v0.41.0

func PgpoolValidateVolumesMountPaths(podTemplate *ofst.PodTemplateSpec) error

func Resource

func Resource(resource string) schema.GroupResource

Resource takes an unqualified resource and returns a Group qualified GroupResource

func SetDefaultClient added in v0.38.0

func SetDefaultClient(kc client.Client)

func UsesAcmeIssuer added in v0.32.0

func UsesAcmeIssuer(kc client.Client, ns string, issuerRef core.TypedLocalObjectReference) (bool, error)

Types

type AddressType added in v0.18.0

type AddressType string

+kubebuilder:validation:Enum=DNS;IP;IPv4;IPv6

const (
	AddressTypeDNS AddressType = "DNS"
	// Uses spec.podIP as address for db pods.
	AddressTypeIP AddressType = "IP"
	// Uses first IPv4 address from spec.podIP, spec.podIPs fields as address for db pods.
	AddressTypeIPv4 AddressType = "IPv4"
	// Uses first IPv6 address from spec.podIP, spec.podIPs fields as address for db pods.
	AddressTypeIPv6 AddressType = "IPv6"
)

func (AddressType) IsIP added in v0.18.0

func (a AddressType) IsIP() bool

type Age added in v0.29.0

type Age struct {
	// Populated by Provisioner when authSecret is created or Ops Manager when authSecret is updated.
	LastUpdateTimestamp metav1.Time `json:"lastUpdateTimestamp,omitempty"`
}

func (*Age) DeepCopy added in v0.29.0

func (in *Age) DeepCopy() *Age

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Age.

func (*Age) DeepCopyInto added in v0.29.0

func (in *Age) DeepCopyInto(out *Age)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type AllowedConsumers added in v0.25.0

type AllowedConsumers struct {
	// Namespaces indicates namespaces from which Consumers may be attached to
	//
	// +optional
	// +kubebuilder:default={from: Same}
	Namespaces *ConsumerNamespaces `json:"namespaces,omitempty"`

	// Selector specifies a selector for consumers that are allowed to bind
	// to this database instance.
	//
	// +optional
	Selector *metav1.LabelSelector `json:"selector,omitempty"`
}

AllowedConsumers defines which consumers may refer to a database instance.

func (*AllowedConsumers) DeepCopy added in v0.25.0

func (in *AllowedConsumers) DeepCopy() *AllowedConsumers

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedConsumers.

func (*AllowedConsumers) DeepCopyInto added in v0.25.0

func (in *AllowedConsumers) DeepCopyInto(out *AllowedConsumers)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ArbiterSpec added in v0.38.0

type ArbiterSpec struct {
	// Compute Resources required by the sidecar container.
	// +optional
	Resources core.ResourceRequirements `json:"resources,omitempty"`
	// NodeSelector is a selector which must be true for the pod to fit on a node.
	// Selector which must match a node's labels for the pod to be scheduled on that node.
	// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
	// +optional
	// +mapType=atomic
	NodeSelector map[string]string `json:"nodeSelector,omitempty"`
	// If specified, the pod's tolerations.
	// +optional
	Tolerations []core.Toleration `json:"tolerations,omitempty"`
}

func (*ArbiterSpec) DeepCopy added in v0.38.0

func (in *ArbiterSpec) DeepCopy() *ArbiterSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArbiterSpec.

func (*ArbiterSpec) DeepCopyInto added in v0.38.0

func (in *ArbiterSpec) DeepCopyInto(out *ArbiterSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type Archiver added in v0.38.0

type Archiver struct {
	// Pause is used to stop the archiver backup for the database
	// +optional
	Pause bool `json:"pause,omitempty"`
	// Ref is the name and namespace reference to the Archiver CR
	Ref kmapi.ObjectReference `json:"ref"`
}

func (*Archiver) DeepCopy added in v0.38.0

func (in *Archiver) DeepCopy() *Archiver

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Archiver.

func (*Archiver) DeepCopyInto added in v0.38.0

func (in *Archiver) DeepCopyInto(out *Archiver)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ArchiverRecovery added in v0.38.0

type ArchiverRecovery struct {
	RecoveryTimestamp metav1.Time `json:"recoveryTimestamp"`
	// +optional
	EncryptionSecret *kmapi.ObjectReference `json:"encryptionSecret,omitempty"`
	// +optional
	ManifestRepository *kmapi.ObjectReference `json:"manifestRepository,omitempty"`

	// FullDBRepository means db restore + manifest restore
	FullDBRepository *kmapi.ObjectReference `json:"fullDBRepository,omitempty"`
}

func (*ArchiverRecovery) DeepCopy added in v0.38.0

func (in *ArchiverRecovery) DeepCopy() *ArchiverRecovery

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchiverRecovery.

func (*ArchiverRecovery) DeepCopyInto added in v0.38.0

func (in *ArchiverRecovery) DeepCopyInto(out *ArchiverRecovery)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type AutoOpsSpec added in v0.28.0

type AutoOpsSpec struct {
	// Disabled specifies whether the ops-request recommendation generation will be disabled or not.
	// +optional
	Disabled bool `json:"disabled,omitempty"`
}

AutoOpsSpec defines the specifications of automatic ops-request recommendation generation

func (*AutoOpsSpec) DeepCopy added in v0.28.0

func (in *AutoOpsSpec) DeepCopy() *AutoOpsSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoOpsSpec.

func (*AutoOpsSpec) DeepCopyInto added in v0.28.0

func (in *AutoOpsSpec) DeepCopyInto(out *AutoOpsSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClickHouse added in v0.46.0

type ClickHouse struct {
	metav1.TypeMeta   `json:",inline"`
	metav1.ObjectMeta `json:"metadata,omitempty"`

	Spec   ClickHouseSpec   `json:"spec,omitempty"`
	Status ClickHouseStatus `json:"status,omitempty"`
}

+kubebuilder:object:root=true +kubebuilder:subresource:status +kubebuilder:resource:shortName=ch,scope=Namespaced +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"

func (*ClickHouse) AppBindingMeta added in v0.46.0

func (c *ClickHouse) AppBindingMeta() appcat.AppBindingMeta

func (*ClickHouse) ClusterGoverningServiceDNS added in v0.46.0

func (c *ClickHouse) ClusterGoverningServiceDNS(petSetName string, replicaNo int) string

func (*ClickHouse) ClusterGoverningServiceName added in v0.46.0

func (c *ClickHouse) ClusterGoverningServiceName(name string) string

func (*ClickHouse) ClusterPodLabels added in v0.46.0

func (c *ClickHouse) ClusterPodLabels(petSetName string, labels map[string]string, extraLabels ...map[string]string) map[string]string

func (*ClickHouse) ConfigSecretName added in v0.46.0

func (r *ClickHouse) ConfigSecretName() string

func (*ClickHouse) CustomResourceDefinition added in v0.46.0

func (r *ClickHouse) CustomResourceDefinition() *apiextensions.CustomResourceDefinition

func (*ClickHouse) DeepCopy added in v0.46.0

func (in *ClickHouse) DeepCopy() *ClickHouse

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickHouse.

func (*ClickHouse) DeepCopyInto added in v0.46.0

func (in *ClickHouse) DeepCopyInto(out *ClickHouse)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*ClickHouse) DeepCopyObject added in v0.46.0

func (in *ClickHouse) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

func (*ClickHouse) Default added in v0.46.0

func (r *ClickHouse) Default()

Default implements webhook.Defaulter so a webhook will be registered for the type

func (*ClickHouse) DefaultUserCredSecretName added in v0.46.0

func (c *ClickHouse) DefaultUserCredSecretName(username string) string

func (*ClickHouse) Finalizer added in v0.46.0

func (c *ClickHouse) Finalizer() string

func (*ClickHouse) GetAuthSecretName added in v0.46.0

func (c *ClickHouse) GetAuthSecretName() string

func (*ClickHouse) GetConnectionScheme added in v0.46.0

func (c *ClickHouse) GetConnectionScheme() string

func (*ClickHouse) GoverningServiceName added in v0.46.0

func (c *ClickHouse) GoverningServiceName() string

func (*ClickHouse) OffshootClusterLabels added in v0.46.0

func (c *ClickHouse) OffshootClusterLabels(petSetName string) map[string]string

func (*ClickHouse) OffshootClusterName added in v0.46.0

func (c *ClickHouse) OffshootClusterName(value string) string

func (*ClickHouse) OffshootClusterPetSetName added in v0.46.0

func (c *ClickHouse) OffshootClusterPetSetName(clusterName string, shardNo int) string

func (*ClickHouse) OffshootClusterSelectors added in v0.46.0

func (c *ClickHouse) OffshootClusterSelectors(petSetName string, extraSelectors ...map[string]string) map[string]string

func (*ClickHouse) OffshootLabels added in v0.46.0

func (c *ClickHouse) OffshootLabels() map[string]string

func (*ClickHouse) OffshootName added in v0.46.0

func (c *ClickHouse) OffshootName() string

func (*ClickHouse) OffshootSelectors added in v0.46.0

func (c *ClickHouse) OffshootSelectors(extraSelectors ...map[string]string) map[string]string

func (*ClickHouse) Owner added in v0.46.0

func (c *ClickHouse) Owner() *meta.OwnerReference

Owner returns owner reference to resources

func (*ClickHouse) PVCName added in v0.46.0

func (c *ClickHouse) PVCName(alias string) string

func (*ClickHouse) PetSetName added in v0.46.0

func (c *ClickHouse) PetSetName() string

func (*ClickHouse) PodLabels added in v0.46.0

func (c *ClickHouse) PodLabels(extraLabels ...map[string]string) map[string]string

func (*ClickHouse) PrimaryServiceDNS added in v0.46.0

func (c *ClickHouse) PrimaryServiceDNS() string

func (*ClickHouse) ResourceFQN added in v0.46.0

func (c *ClickHouse) ResourceFQN() string

func (*ClickHouse) ResourceKind added in v0.46.0

func (c *ClickHouse) ResourceKind() string

func (*ClickHouse) ResourcePlural added in v0.46.0

func (c *ClickHouse) ResourcePlural() string

func (*ClickHouse) ResourceSingular added in v0.46.0

func (c *ClickHouse) ResourceSingular() string

func (*ClickHouse) ServiceName added in v0.46.0

func (c *ClickHouse) ServiceName() string

func (*ClickHouse) SetDefaults added in v0.46.0

func (c *ClickHouse) SetDefaults()

func (*ClickHouse) SetHealthCheckerDefaults added in v0.46.0

func (c *ClickHouse) SetHealthCheckerDefaults()

func (*ClickHouse) ValidateCreate added in v0.46.0

func (r *ClickHouse) ValidateCreate() (admission.Warnings, error)

ValidateCreate implements webhook.Validator so a webhook will be registered for the type

func (*ClickHouse) ValidateCreateOrUpdate added in v0.46.0

func (r *ClickHouse) ValidateCreateOrUpdate() error

func (*ClickHouse) ValidateDelete added in v0.46.0

func (r *ClickHouse) ValidateDelete() (admission.Warnings, error)

ValidateDelete implements webhook.Validator so a webhook will be registered for the type

func (*ClickHouse) ValidateUpdate added in v0.46.0

func (r *ClickHouse) ValidateUpdate(old runtime.Object) (admission.Warnings, error)

ValidateUpdate implements webhook.Validator so a webhook will be registered for the type

func (*ClickHouse) ValidateVersion added in v0.46.0

func (r *ClickHouse) ValidateVersion(db *ClickHouse) error

type ClickHouseKeeperConfig added in v0.46.0

type ClickHouseKeeperConfig struct {
	Node ClickHouseKeeperNode `json:"node,omitempty"`
}

func (*ClickHouseKeeperConfig) DeepCopy added in v0.46.0

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickHouseKeeperConfig.

func (*ClickHouseKeeperConfig) DeepCopyInto added in v0.46.0

func (in *ClickHouseKeeperConfig) DeepCopyInto(out *ClickHouseKeeperConfig)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClickHouseKeeperNode added in v0.46.0

type ClickHouseKeeperNode struct {
	Host string `json:"host,omitempty"`

	// +optional
	Port int32 `json:"port,omitempty"`
}

ClickHouseKeeperNode defines item of nodes section of .spec.clusterTopology.

func (*ClickHouseKeeperNode) DeepCopy added in v0.46.0

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickHouseKeeperNode.

func (*ClickHouseKeeperNode) DeepCopyInto added in v0.46.0

func (in *ClickHouseKeeperNode) DeepCopyInto(out *ClickHouseKeeperNode)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClickHouseList added in v0.46.0

type ClickHouseList struct {
	metav1.TypeMeta `json:",inline"`
	metav1.ListMeta `json:"metadata,omitempty"`
	Items           []ClickHouse `json:"items"`
}

ClickHouseList contains a list of ClickHouse

func (*ClickHouseList) DeepCopy added in v0.46.0

func (in *ClickHouseList) DeepCopy() *ClickHouseList

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickHouseList.

func (*ClickHouseList) DeepCopyInto added in v0.46.0

func (in *ClickHouseList) DeepCopyInto(out *ClickHouseList)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*ClickHouseList) DeepCopyObject added in v0.46.0

func (in *ClickHouseList) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

type ClickHouseSpec added in v0.46.0

type ClickHouseSpec struct {
	// Version of ClickHouse to be deployed.
	Version string `json:"version"`

	// Number of instances to deploy for a ClickHouse database.
	// +optional
	Replicas *int32 `json:"replicas,omitempty"`

	// Cluster
	// +optional
	ClusterTopology *ClusterTopology `json:"clusterTopology,omitempty"`

	// StorageType can be durable (default) or ephemeral
	StorageType StorageType `json:"storageType,omitempty"`

	// Storage to specify how storage shall be used.
	Storage *core.PersistentVolumeClaimSpec `json:"storage,omitempty"`

	// disable security. It disables authentication security of user.
	// If unset, default is false
	// +optional
	DisableSecurity bool `json:"disableSecurity,omitempty"`

	// Database authentication secret
	// +optional
	AuthSecret *SecretReference `json:"authSecret,omitempty"`

	// PodTemplate is an optional configuration for pods used to expose database
	// +optional
	PodTemplate *ofst.PodTemplateSpec `json:"podTemplate,omitempty"`

	// ServiceTemplates is an optional configuration for services used to expose database
	// +optional
	ServiceTemplates []NamedServiceTemplateSpec `json:"serviceTemplates,omitempty"`

	// DeletionPolicy controls the delete operation for database
	// +optional
	DeletionPolicy TerminationPolicy `json:"deletionPolicy,omitempty"`

	// HealthChecker defines attributes of the health checker
	// +optional
	// +kubebuilder:default={periodSeconds: 20, timeoutSeconds: 10, failureThreshold: 3}
	HealthChecker kmapi.HealthCheckSpec `json:"healthChecker"`
}

ClickHouseSpec defines the desired state of ClickHouse

func (*ClickHouseSpec) DeepCopy added in v0.46.0

func (in *ClickHouseSpec) DeepCopy() *ClickHouseSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickHouseSpec.

func (*ClickHouseSpec) DeepCopyInto added in v0.46.0

func (in *ClickHouseSpec) DeepCopyInto(out *ClickHouseSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClickHouseStatus added in v0.46.0

type ClickHouseStatus struct {
	// Specifies the current phase of the database
	// +optional
	Phase DatabasePhase `json:"phase,omitempty"`
	// observedGeneration is the most recent generation observed for this resource. It corresponds to the
	// resource's generation, which is updated on mutation by the API Server.
	// +optional
	ObservedGeneration int64 `json:"observedGeneration,omitempty"`
	// Conditions applied to the database, such as approval or denial.
	// +optional
	Conditions []kmapi.Condition `json:"conditions,omitempty"`
	// +optional
	Gateway *Gateway `json:"gateway,omitempty"`
}

ClickHouseStatus defines the observed state of ClickHouse

func (*ClickHouseStatus) DeepCopy added in v0.46.0

func (in *ClickHouseStatus) DeepCopy() *ClickHouseStatus

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickHouseStatus.

func (*ClickHouseStatus) DeepCopyInto added in v0.46.0

func (in *ClickHouseStatus) DeepCopyInto(out *ClickHouseStatus)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClickhouseApp added in v0.46.0

type ClickhouseApp struct {
	*ClickHouse
}

func (*ClickhouseApp) DeepCopy added in v0.46.0

func (in *ClickhouseApp) DeepCopy() *ClickhouseApp

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseApp.

func (*ClickhouseApp) DeepCopyInto added in v0.46.0

func (in *ClickhouseApp) DeepCopyInto(out *ClickhouseApp)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (ClickhouseApp) Name added in v0.46.0

func (r ClickhouseApp) Name() string

func (ClickhouseApp) Type added in v0.46.0

func (r ClickhouseApp) Type() appcat.AppType

type ClusterAuthMode

type ClusterAuthMode string

ClusterAuthMode represents the clusterAuthMode of mongodb clusters ( replicaset or sharding) ref: https://docs.mongodb.com/manual/reference/program/mongod/#cmdoption-mongod-clusterauthmode +kubebuilder:validation:Enum=keyFile;sendKeyFile;sendX509;x509

const (
	// ClusterAuthModeKeyFile represents `keyFile` mongodb clusterAuthMode. In this mode, Use a keyfile for authentication. Accept only keyfiles.
	ClusterAuthModeKeyFile ClusterAuthMode = "keyFile"

	// ClusterAuthModeSendKeyFile represents `sendKeyFile` mongodb clusterAuthMode.
	// This mode is for rolling upgrade purposes. Send a keyfile for authentication but can accept both keyfiles
	// and x.509 certificates.
	ClusterAuthModeSendKeyFile ClusterAuthMode = "sendKeyFile"

	// ClusterAuthModeSendX509 represents `sendx509` mongodb clusterAuthMode. This mode is usually for rolling upgrade purposes.
	// Send the x.509 certificate for authentication but can accept both keyfiles and x.509 certificates.
	ClusterAuthModeSendX509 ClusterAuthMode = "sendX509"

	// ClusterAuthModeX509 represents `x509` mongodb clusterAuthMode. This is the recommended clusterAuthMode.
	// Send the x.509 certificate for authentication and accept only x.509 certificates.
	ClusterAuthModeX509 ClusterAuthMode = "x509"
)

type ClusterSpec added in v0.46.0

type ClusterSpec struct {
	// Cluster Name
	Name string `json:"name,omitempty"`
	// Number of replica for each shard to deploy for a cluster.
	// +optional
	Replicas *int32 `json:"replicas,omitempty"`

	// Number of shard to deploy for a cluster.
	// +optional
	Shards *int32 `json:"shards,omitempty"`

	// PodTemplate is an optional configuration for pods used to expose database
	// +optional
	PodTemplate *ofst.PodTemplateSpec `json:"podTemplate,omitempty"`

	// Storage to specify how storage shall be used.
	Storage *core.PersistentVolumeClaimSpec `json:"storage,omitempty"`

	// StorageType can be durable (default) or ephemeral
	StorageType StorageType `json:"storageType,omitempty"`
}

func (*ClusterSpec) DeepCopy added in v0.46.0

func (in *ClusterSpec) DeepCopy() *ClusterSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.

func (*ClusterSpec) DeepCopyInto added in v0.46.0

func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterTopology added in v0.46.0

type ClusterTopology struct {
	// Clickhouse Cluster Structure
	Cluster []ClusterSpec `json:"cluster,omitempty"`

	// ClickHouse Keeper server name
	ClickHouseKeeper *ClickHouseKeeperConfig `json:"clickHouseKeeper,omitempty"`
}

func (*ClusterTopology) DeepCopy added in v0.46.0

func (in *ClusterTopology) DeepCopy() *ClusterTopology

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTopology.

func (*ClusterTopology) DeepCopyInto added in v0.46.0

func (in *ClusterTopology) DeepCopyInto(out *ClusterTopology)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ConnectionPoolConfig

type ConnectionPoolConfig struct {
	// Port is the port number on which PgBouncer listens to clients. Default: 5432.
	// +kubebuilder:default=5432
	// +optional
	Port *int32 `json:"port,omitempty"`
	// PoolMode is the pooling mechanism type. Default: session.
	// +kubebuilder:default="session"
	// +optional
	PoolMode string `json:"poolMode,omitempty"`
	// MaxClientConnections is the maximum number of allowed client connections. Default: 100.
	// +kubebuilder:default=100
	// +optional
	MaxClientConnections *int64 `json:"maxClientConnections,omitempty"`
	// DefaultPoolSize specifies how many server connections to allow per user/database pair. Default: 20.
	// +kubebuilder:default=20
	// +optional
	DefaultPoolSize *int64 `json:"defaultPoolSize,omitempty"`
	// MinPoolSize is used to add more server connections to pool if below this number. Default: 0 (disabled).
	// +kubebuilder:default=0
	// +optional
	MinPoolSize *int64 `json:"minPoolSize,omitempty"`
	// ReservePoolSize specifies how many additional connections to allow to a pool. 0 disables. Default: 0 (disabled).
	// +kubebuilder:default=0
	// +optional
	ReservePoolSize *int64 `json:"reservePoolSize,omitempty"`
	// ReservePoolTimeoutSeconds is the number of seconds in which if a client has not been serviced,
	// pgbouncer enables use of additional connections from reserve pool. 0 disables. Default: 5.0.
	// +kubebuilder:default=5
	// +optional
	ReservePoolTimeoutSeconds *int64 `json:"reservePoolTimeoutSeconds,omitempty"`
	// MaxDBConnections is the maximum number of connections allowed per-database. Default: 0 (unlimited).
	// +kubebuilder:default=0
	// +optional
	MaxDBConnections *int64 `json:"maxDBConnections,omitempty"`
	// MaxUserConnections is the maximum number of users allowed per-database. Default: 0 (unlimited).
	// +kubebuilder:default=0
	// +optional
	MaxUserConnections *int64 `json:"maxUserConnections,omitempty"`
	// StatsPeriodSeconds sets how often the averages shown in various SHOW commands are updated
	// and how often aggregated statistics are written to the log. Default: 60
	// +kubebuilder:default=60
	// +optional
	StatsPeriodSeconds *int64 `json:"statsPeriodSeconds,omitempty"`
	// AuthType specifies how to authenticate users. Default: md5 (md5+plain text).
	// +kubebuilder:default=md5
	// +optional
	AuthType PgBouncerClientAuthMode `json:"authType,omitempty"`
	// IgnoreStartupParameters specifies comma-separated startup parameters that
	// pgbouncer knows are handled by admin and it can ignore them. Default: empty
	// +kubebuilder:default="empty"
	// +optional
	IgnoreStartupParameters string `json:"ignoreStartupParameters,omitempty"`
}

func (*ConnectionPoolConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolConfig.

func (*ConnectionPoolConfig) DeepCopyInto

func (in *ConnectionPoolConfig) DeepCopyInto(out *ConnectionPoolConfig)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ConsumerNamespaces added in v0.25.0

type ConsumerNamespaces struct {
	// From indicates where Consumers will be selected for the database instance. Possible
	// values are:
	// * All: Consumers in all namespaces.
	// * Selector: Consumers in namespaces selected by the selector
	// * Same: Only Consumers in the same namespace
	//
	// +optional
	// +kubebuilder:default=Same
	From *FromNamespaces `json:"from,omitempty"`

	// Selector must be specified when From is set to "Selector". In that case,
	// only Consumers in Namespaces matching this Selector will be selected by the
	// database instance. This field is ignored for other values of "From".
	//
	// +optional
	Selector *metav1.LabelSelector `json:"selector,omitempty"`
}

ConsumerNamespaces indicate which namespaces Consumers should be selected from.

func (*ConsumerNamespaces) DeepCopy added in v0.25.0

func (in *ConsumerNamespaces) DeepCopy() *ConsumerNamespaces

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsumerNamespaces.

func (*ConsumerNamespaces) DeepCopyInto added in v0.25.0

func (in *ConsumerNamespaces) DeepCopyInto(out *ConsumerNamespaces)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type CoordinatorSpec added in v0.21.0

type CoordinatorSpec struct {
	// Compute Resources required by coordinator container.
	// Cannot be updated.
	// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
	// +optional
	Resources core.ResourceRequirements `json:"resources,omitempty"`

	// Security options the coordinator container should run with.
	// More info: https://kubernetes.io/docs/concepts/policy/security-context/
	// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
	// +optional
	SecurityContext *core.SecurityContext `json:"securityContext,omitempty"`
}

CoordinatorSpec defines attributes of the coordinator container

func (*CoordinatorSpec) DeepCopy added in v0.21.0

func (in *CoordinatorSpec) DeepCopy() *CoordinatorSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoordinatorSpec.

func (*CoordinatorSpec) DeepCopyInto added in v0.21.0

func (in *CoordinatorSpec) DeepCopyInto(out *CoordinatorSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type Database added in v0.46.0

type Database struct {
	// SyncUsers is a boolean type and when enabled, operator fetches users of backend server from externally managed
	// secrets to the PgBouncer server. Secrets updation or deletion are also synced in pgBouncer when it is enabled.
	// +optional
	SyncUsers bool `json:"syncUsers,omitempty"`

	// DatabaseRef specifies the database appbinding reference in any namespace.
	DatabaseRef appcat.AppReference `json:"databaseRef"`

	// DatabaseName is the name of the target database inside a Postgres instance.
	DatabaseName string `json:"databaseName"`
}

func (*Database) DeepCopy added in v0.46.0

func (in *Database) DeepCopy() *Database

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database.

func (*Database) DeepCopyInto added in v0.46.0

func (in *Database) DeepCopyInto(out *Database)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type DatabasePhase

type DatabasePhase string

+kubebuilder:validation:Enum=Provisioning;DataRestoring;Ready;Critical;NotReady;Halted;Unknown

const (
	// used for Databases that are currently provisioning
	DatabasePhaseProvisioning DatabasePhase = "Provisioning"
	// used for Databases for which data is currently restoring
	DatabasePhaseDataRestoring DatabasePhase = "DataRestoring"
	// used for Databases that are currently ReplicaReady, AcceptingConnection and Ready
	DatabasePhaseReady DatabasePhase = "Ready"
	// used for Databases that can connect, ReplicaReady == false || Ready == false (eg, ES yellow)
	DatabasePhaseCritical DatabasePhase = "Critical"
	// used for Databases that can't connect
	DatabasePhaseNotReady DatabasePhase = "NotReady"
	// used for Databases that are halted
	DatabasePhaseHalted DatabasePhase = "Halted"
	// used for Databases for which Phase can't be calculated
	DatabasePhaseUnknown DatabasePhase = "Unknown"
)

type DeepStorageSpec added in v0.41.0

type DeepStorageSpec struct {
	// Specifies the storage type to be used by druid
	// Possible values: s3, google, azure, hdfs
	Type DruidDeepStorageType `json:"type"`

	// deepStorage.configSecret should contain the necessary data
	// to connect to the deep storage
	// +optional
	ConfigSecret *core.LocalObjectReference `json:"configSecret,omitempty"`
}

func (*DeepStorageSpec) DeepCopy added in v0.41.0

func (in *DeepStorageSpec) DeepCopy() *DeepStorageSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeepStorageSpec.

func (*DeepStorageSpec) DeepCopyInto added in v0.41.0

func (in *DeepStorageSpec) DeepCopyInto(out *DeepStorageSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type Druid added in v0.41.0

type Druid struct {
	metav1.TypeMeta   `json:",inline"`
	metav1.ObjectMeta `json:"metadata,omitempty"`

	Spec   DruidSpec   `json:"spec,omitempty"`
	Status DruidStatus `json:"status,omitempty"`
}

+kubebuilder:object:root=true +kubebuilder:subresource:status +kubebuilder:resource:shortName=dr,scope=Namespaced +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"

func (*Druid) AddDruidExtensionLoadList added in v0.41.0

func (d *Druid) AddDruidExtensionLoadList(druidExtensionLoadList string, extension string) string

func (*Druid) AppBindingMeta added in v0.41.0

func (d *Druid) AppBindingMeta() appcat.AppBindingMeta

func (*Druid) BrokersServiceName added in v0.41.0

func (d *Druid) BrokersServiceName() string

func (*Druid) ConfigSecretName added in v0.41.0

func (d *Druid) ConfigSecretName() string

func (*Druid) CoordinatorsServiceName added in v0.41.0

func (d *Druid) CoordinatorsServiceName() string

func (*Druid) CustomResourceDefinition added in v0.41.0

func (d *Druid) CustomResourceDefinition() *apiextensions.CustomResourceDefinition

func (*Druid) DeepCopy added in v0.41.0

func (in *Druid) DeepCopy() *Druid

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Druid.

func (*Druid) DeepCopyInto added in v0.41.0

func (in *Druid) DeepCopyInto(out *Druid)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*Druid) DeepCopyObject added in v0.41.0

func (in *Druid) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

func (*Druid) Default added in v0.41.0

func (d *Druid) Default()

Default implements webhook.Defaulter so a webhook will be registered for the type

func (*Druid) DefaultUserCredSecretName added in v0.41.0

func (d *Druid) DefaultUserCredSecretName(username string) string

func (*Druid) DruidNodeContainerPort added in v0.41.0

func (d *Druid) DruidNodeContainerPort(nodeRole DruidNodeRoleType) int32

func (*Druid) DruidNodeRoleString added in v0.41.0

func (d *Druid) DruidNodeRoleString(nodeRole DruidNodeRoleType) string

func (*Druid) DruidNodeRoleStringSingular added in v0.41.0

func (d *Druid) DruidNodeRoleStringSingular(nodeRole DruidNodeRoleType) string

func (*Druid) Finalizer added in v0.44.0

func (r *Druid) Finalizer() string

func (*Druid) GetConnectionScheme added in v0.41.0

func (d *Druid) GetConnectionScheme() string

func (*Druid) GetDruidSegmentCacheConfig added in v0.41.0

func (d *Druid) GetDruidSegmentCacheConfig() string

func (*Druid) GetDruidStorageSize added in v0.41.0

func (d *Druid) GetDruidStorageSize(storageSize string) string

func (*Druid) GetMetadataStorageConnectURI added in v0.41.0

func (d *Druid) GetMetadataStorageConnectURI(appbinding *appcat.AppBinding, metadataStorageType DruidMetadataStorageType) string

func (*Druid) GetMetadataStorageType added in v0.41.0

func (d *Druid) GetMetadataStorageType(metadataStorage string) DruidMetadataStorageType

func (*Druid) GetNodeSpec added in v0.46.0

func (d *Druid) GetNodeSpec(nodeType DruidNodeRoleType) (*DruidNode, *DruidDataNode)

func (*Druid) GetPersistentSecrets added in v0.41.0

func (d *Druid) GetPersistentSecrets() []string

func (*Druid) GetZKServiceHost added in v0.41.0

func (d *Druid) GetZKServiceHost(appbinding *appcat.AppBinding) string

func (*Druid) GoverningServiceName added in v0.41.0

func (d *Druid) GoverningServiceName() string

func (*Druid) OffShootLabels added in v0.41.0

func (d *Druid) OffShootLabels() map[string]string

func (*Druid) OffShootName added in v0.41.0

func (d *Druid) OffShootName() string

func (*Druid) OffShootSelectors added in v0.41.0

func (d *Druid) OffShootSelectors(extraSelectors ...map[string]string) map[string]string

func (Druid) OffshootLabels added in v0.41.0

func (d Druid) OffshootLabels() map[string]string

func (*Druid) OffshootSelectors added in v0.41.0

func (d *Druid) OffshootSelectors(extraSelectors ...map[string]string) map[string]string

func (*Druid) OverlordsServiceName added in v0.41.0

func (d *Druid) OverlordsServiceName() string

func (*Druid) Owner added in v0.41.0

func (d *Druid) Owner() *meta.OwnerReference

func (*Druid) PVCName added in v0.41.0

func (d *Druid) PVCName(alias string) string

func (*Druid) PetSetName added in v0.44.0

func (d *Druid) PetSetName(nodeRole DruidNodeRoleType) string

func (*Druid) PodControllerLabels added in v0.41.0

func (d *Druid) PodControllerLabels(nodeType DruidNodeRoleType, extraLabels ...map[string]string) map[string]string

func (*Druid) PodLabels added in v0.41.0

func (d *Druid) PodLabels(nodeType DruidNodeRoleType, extraLabels ...map[string]string) map[string]string

func (*Druid) ReplicasAreReady added in v0.41.0

func (d *Druid) ReplicasAreReady(lister pslister.PetSetLister) (bool, string, error)

func (*Druid) ResourceFQN added in v0.41.0

func (d *Druid) ResourceFQN() string

func (*Druid) ResourceKind added in v0.41.0

func (d *Druid) ResourceKind() string

func (*Druid) ResourcePlural added in v0.41.0

func (d *Druid) ResourcePlural() string

func (*Druid) ResourceSingular added in v0.41.0

func (d *Druid) ResourceSingular() string

func (*Druid) RoutersServiceName added in v0.41.0

func (d *Druid) RoutersServiceName() string

func (*Druid) ServiceAccountName added in v0.41.0

func (d *Druid) ServiceAccountName() string

func (*Druid) ServiceLabels added in v0.41.0

func (d *Druid) ServiceLabels(alias ServiceAlias, extraLabels ...map[string]string) map[string]string

func (*Druid) ServiceName added in v0.41.0

func (d *Druid) ServiceName() string

func (*Druid) SetDefaults added in v0.41.0

func (d *Druid) SetDefaults()

func (*Druid) SetHealthCheckerDefaults added in v0.41.0

func (d *Druid) SetHealthCheckerDefaults()

func (*Druid) StatsService added in v0.44.0

func (d *Druid) StatsService() mona.StatsAccessor

func (*Druid) StatsServiceLabels added in v0.44.0

func (d *Druid) StatsServiceLabels() map[string]string

func (*Druid) ValidateCreate added in v0.41.0

func (d *Druid) ValidateCreate() (admission.Warnings, error)

ValidateCreate implements webhook.Validator so a webhook will be registered for the type

func (*Druid) ValidateDelete added in v0.41.0

func (d *Druid) ValidateDelete() (admission.Warnings, error)

ValidateDelete implements webhook.Validator so a webhook will be registered for the type

func (*Druid) ValidateUpdate added in v0.41.0

func (d *Druid) ValidateUpdate(old runtime.Object) (admission.Warnings, error)

ValidateUpdate implements webhook.Validator so a webhook will be registered for the type

type DruidApp added in v0.41.0

type DruidApp struct {
	*Druid
}

func (*DruidApp) DeepCopy added in v0.41.0

func (in *DruidApp) DeepCopy() *DruidApp

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DruidApp.

func (*DruidApp) DeepCopyInto added in v0.41.0

func (in *DruidApp) DeepCopyInto(out *DruidApp)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (DruidApp) Name added in v0.41.0

func (d DruidApp) Name() string

func (DruidApp) Type added in v0.41.0

func (d DruidApp) Type() appcat.AppType

type DruidClusterTopology added in v0.41.0

type DruidClusterTopology struct {
	Coordinators *DruidNode `json:"coordinators,omitempty"`
	// +optional
	Overlords *DruidNode `json:"overlords,omitempty"`

	MiddleManagers *DruidDataNode `json:"middleManagers,omitempty"`

	Historicals *DruidDataNode `json:"historicals,omitempty"`

	Brokers *DruidNode `json:"brokers,omitempty"`
	// +optional
	Routers *DruidNode `json:"routers,omitempty"`
}

func (*DruidClusterTopology) DeepCopy added in v0.41.0

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DruidClusterTopology.

func (*DruidClusterTopology) DeepCopyInto added in v0.41.0

func (in *DruidClusterTopology) DeepCopyInto(out *DruidClusterTopology)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type DruidDataNode added in v0.46.0

type DruidDataNode struct {
	// DruidDataNode has all the characteristics of DruidNode
	DruidNode `json:",inline"`

	// StorageType specifies if the storage
	// of this node is durable (default) or ephemeral.
	StorageType StorageType `json:"storageType,omitempty"`

	// Storage to specify how storage shall be used.
	Storage *core.PersistentVolumeClaimSpec `json:"storage,omitempty"`

	// EphemeralStorage spec to specify the configuration of ephemeral storage type.
	EphemeralStorage *core.EmptyDirVolumeSource `json:"ephemeralStorage,omitempty"`
}

func (*DruidDataNode) DeepCopy added in v0.46.0

func (in *DruidDataNode) DeepCopy() *DruidDataNode

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DruidDataNode.

func (*DruidDataNode) DeepCopyInto added in v0.46.0

func (in *DruidDataNode) DeepCopyInto(out *DruidDataNode)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type DruidDeepStorageType added in v0.41.0

type DruidDeepStorageType string

+kubebuilder:validation:Enum=s3;google;azure;hdfs

const (
	DruidDeepStorageS3     DruidDeepStorageType = "s3"
	DruidDeepStorageGoogle DruidDeepStorageType = "google"
	DruidDeepStorageAzure  DruidDeepStorageType = "azure"
	DruidDeepStorageHDFS   DruidDeepStorageType = "hdfs"
)

type DruidList added in v0.41.0

type DruidList struct {
	metav1.TypeMeta `json:",inline"`
	metav1.ListMeta `json:"metadata,omitempty"`
	Items           []Druid `json:"items"`
}

DruidList contains a list of Druid

func (*DruidList) DeepCopy added in v0.41.0

func (in *DruidList) DeepCopy() *DruidList

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DruidList.

func (*DruidList) DeepCopyInto added in v0.41.0

func (in *DruidList) DeepCopyInto(out *DruidList)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*DruidList) DeepCopyObject added in v0.41.0

func (in *DruidList) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

type DruidMetadataStorageType added in v0.41.0

type DruidMetadataStorageType string

+kubebuilder:validation:Enum=MySQL;PostgreSQL

const (
	DruidMetadataStorageMySQL      DruidMetadataStorageType = "MySQL"
	DruidMetadataStoragePostgreSQL DruidMetadataStorageType = "PostgreSQL"
)

type DruidNode added in v0.41.0

type DruidNode struct {
	// Replicas represents number of replicas for the specific type of node
	// +kubebuilder:default=1
	// +optional
	Replicas *int32 `json:"replicas,omitempty"`

	// Suffix to append with node name
	// +optional
	Suffix string `json:"suffix,omitempty"`

	// PodTemplate is an optional configuration for pods used to expose database
	// +optional
	PodTemplate ofst.PodTemplateSpec `json:"podTemplate,omitempty"`
}

func (*DruidNode) DeepCopy added in v0.41.0

func (in *DruidNode) DeepCopy() *DruidNode

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DruidNode.

func (*DruidNode) DeepCopyInto added in v0.41.0

func (in *DruidNode) DeepCopyInto(out *DruidNode)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type DruidNodeRoleType added in v0.41.0

type DruidNodeRoleType string

+kubebuilder:validation:Enum=coordinators;overlords;brokers;routers;middleManagers;historicals

const (
	DruidNodeRoleCoordinators   DruidNodeRoleType = "coordinators"
	DruidNodeRoleOverlords      DruidNodeRoleType = "overlords"
	DruidNodeRoleBrokers        DruidNodeRoleType = "brokers"
	DruidNodeRoleRouters        DruidNodeRoleType = "routers"
	DruidNodeRoleMiddleManagers DruidNodeRoleType = "middleManagers"
	DruidNodeRoleHistoricals    DruidNodeRoleType = "historicals"
)

type DruidPhase added in v0.41.0

type DruidPhase string

+kubebuilder:validation:Enum=Provisioning;Ready;NotReady;Critical

const (
	DruidPhaseProvisioning DruidPhase = "Provisioning"
	DruidPhaseReady        DruidPhase = "Ready"
	DruidPhaseNotReady     DruidPhase = "NotReady"
	DruidPhaseCritical     DruidPhase = "Critical"
)

type DruidSpec added in v0.41.0

type DruidSpec struct {
	// Version of Druid to be deployed.
	Version string `json:"version"`

	// Druid topology for node specification
	// +optional
	Topology *DruidClusterTopology `json:"topology,omitempty"`

	// disable security. It disables authentication security of user.
	// If unset, default is false
	// +optional
	DisableSecurity *bool `json:"disableSecurity,omitempty"`

	// Database authentication secret
	// +optional
	AuthSecret *core.LocalObjectReference `json:"authSecret,omitempty"`

	// ConfigSecret is an optional field to provide custom configuration file for database (i.e. config.properties).
	// If specified, this file will be used as configuration file otherwise default configuration file will be used.
	// +optional
	ConfigSecret *core.LocalObjectReference `json:"configSecret,omitempty"`

	// MetadataStorage contains information for Druid to connect to external dependency metadata storage
	MetadataStorage *MetadataStorage `json:"metadataStorage"`

	// DeepStorage contains specification for druid to connect to the deep storage
	DeepStorage *DeepStorageSpec `json:"deepStorage"`

	// ZooKeeper contains information for Druid to connect to external dependency metadata storage
	// +optional
	ZookeeperRef *ZookeeperRef `json:"zookeeperRef,omitempty"`

	// ServiceTemplates is an optional configuration for services used to expose database
	// +optional
	ServiceTemplates []NamedServiceTemplateSpec `json:"serviceTemplates,omitempty"`

	// Indicates that the database is halted and all offshoot Kubernetes resources except PVCs are deleted.
	// +optional
	Halted bool</