diff --git a/CHANGELOG.md b/CHANGELOG.md index 834b5030..43a69646 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ ### Added +- Added listener support for HBase ([#639]). - Adds new telemetry CLI arguments and environment variables ([#652]). - Use `--file-log-max-files` (or `FILE_LOG_MAX_FILES`) to limit the number of log files kept. - Use `--file-log-rotation-period` (or `FILE_LOG_ROTATION_PERIOD`) to configure the frequency of rotation. @@ -38,6 +39,7 @@ - test: Remove HDFS `3.3.4`, `3.3.6`, and `3.4.0` ([#655]). - test: HBase 2.4.18 removed ([#659]): +[#639]: https://github.com/stackabletech/hbase-operator/pull/639 [#640]: https://github.com/stackabletech/hbase-operator/pull/640 [#645]: https://github.com/stackabletech/hbase-operator/pull/645 [#647]: https://github.com/stackabletech/hbase-operator/pull/647 diff --git a/deploy/helm/hbase-operator/crds/crds.yaml b/deploy/helm/hbase-operator/crds/crds.yaml index 6294fe40..eee702ec 100644 --- a/deploy/helm/hbase-operator/crds/crds.yaml +++ b/deploy/helm/hbase-operator/crds/crds.yaml @@ -73,20 +73,6 @@ spec: hdfsConfigMapName: description: Name of the [discovery ConfigMap](https://docs.stackable.tech/home/nightly/concepts/service_discovery) for an HDFS cluster. type: string - listenerClass: - default: cluster-internal - description: |- - This field controls which type of Service the Operator creates for this HbaseCluster: - - * cluster-internal: Use a ClusterIP service - - * external-unstable: Use a NodePort service - - This is a temporary solution with the goal to keep yaml manifests forward compatible. In the future, this setting will control which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) will be used to expose the service, and ListenerClass names will stay the same, allowing for a non-breaking change. - enum: - - cluster-internal - - external-unstable - type: string vectorAggregatorConfigMapName: description: Name of the Vector aggregator [discovery ConfigMap](https://docs.stackable.tech/home/nightly/concepts/service_discovery). It must contain the key `ADDRESS` with the address of the Vector aggregator. Follow the [logging tutorial](https://docs.stackable.tech/home/nightly/tutorials/logging-vector-aggregator) to learn how to configure log aggregation with Vector. nullable: true @@ -210,6 +196,10 @@ spec: hbaseRootdir: nullable: true type: string + listenerClass: + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. + nullable: true + type: string logging: default: containers: {} @@ -460,6 +450,10 @@ spec: hbaseRootdir: nullable: true type: string + listenerClass: + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. + nullable: true + type: string logging: default: containers: {} @@ -691,6 +685,10 @@ spec: hbaseRootdir: nullable: true type: string + listenerClass: + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. + nullable: true + type: string logging: default: containers: {} @@ -969,6 +967,10 @@ spec: hbaseRootdir: nullable: true type: string + listenerClass: + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. + nullable: true + type: string logging: default: containers: {} @@ -1228,6 +1230,10 @@ spec: hbaseRootdir: nullable: true type: string + listenerClass: + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. + nullable: true + type: string logging: default: containers: {} @@ -1478,6 +1484,10 @@ spec: hbaseRootdir: nullable: true type: string + listenerClass: + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. + nullable: true + type: string logging: default: containers: {} diff --git a/deploy/helm/hbase-operator/templates/roles.yaml b/deploy/helm/hbase-operator/templates/roles.yaml index f13d450b..7bc43bdb 100644 --- a/deploy/helm/hbase-operator/templates/roles.yaml +++ b/deploy/helm/hbase-operator/templates/roles.yaml @@ -77,6 +77,12 @@ rules: verbs: - create - patch + - apiGroups: + - listeners.stackable.tech + resources: + - listeners + verbs: + - get - apiGroups: - {{ include "operator.name" . }}.stackable.tech resources: diff --git a/docs/modules/hbase/pages/usage-guide/listenerclass.adoc b/docs/modules/hbase/pages/usage-guide/listenerclass.adoc index 1f6d48b8..4c19e5e7 100644 --- a/docs/modules/hbase/pages/usage-guide/listenerclass.adoc +++ b/docs/modules/hbase/pages/usage-guide/listenerclass.adoc @@ -1,18 +1,37 @@ = Service exposition with ListenerClasses +:description: Configure HBase service exposure using ListenerClasses to control internal and external access for all roles. -Apache HBase offers an API. -The operator deploys a service called `` (where `` is the name of the HbaseCluster) through which HBase can be reached. +The operator deploys a xref:listener-operator:listener.adoc[Listener] for each Master, Regionserver and Restserver pod. +They all default to only being accessible from within the Kubernetes cluster, but this can be changed by setting `.spec.{masters,regionServers,restServers}.config.listenerClass`: -This service can have either the `cluster-internal` or `external-unstable` type. -`external-stable` is not supported for HBase at the moment. -Read more about the types in the xref:concepts:service-exposition.adoc[service exposition] documentation at platform level. +[source,yaml] +---- +spec: + masters: + config: + listenerClass: external-unstable # <1> + regionServers: + config: + listenerClass: external-unstable + restServers: + config: + listenerClass: external-unstable +---- +<1> Specify one of `external-stable`, `external-unstable`, `cluster-internal` (the default setting is `cluster-internal`). +This can be set separately for all three roles. -This is how the listener class is configured: +Externally-reachable endpoints (i.e. where listener-class = `external-unstable` or `external-unstable`) are written to a ConfigMap called `-ui-endpoints`, listing each rolegroup by replica: [source,yaml] ---- -spec: - clusterConfig: - listenerClass: cluster-internal # <1> +apiVersion: v1 +data: + hbase.master-0.ui: 172.19.0.3:32353 + hbase.master-1.ui: 172.19.0.5:31817 + hbase.regionserver-0.ui: 172.19.0.3:31719 + hbase.regionserver-1.ui: 172.19.0.5:30626 + hbase.restserver-0.ui: 172.19.0.3:31790 + hbase.restserver-1.ui: 172.19.0.5:32292 +kind: ConfigMap +... ---- -<1> The default `cluster-internal` setting. diff --git a/rust/operator-binary/src/crd/mod.rs b/rust/operator-binary/src/crd/mod.rs index dd878acf..f99c0e83 100644 --- a/rust/operator-binary/src/crd/mod.rs +++ b/rust/operator-binary/src/crd/mod.rs @@ -59,8 +59,6 @@ pub const SSL_CLIENT_XML: &str = "ssl-client.xml"; pub const HBASE_CLUSTER_DISTRIBUTED: &str = "hbase.cluster.distributed"; pub const HBASE_ROOTDIR: &str = "hbase.rootdir"; -pub const HBASE_UNSAFE_REGIONSERVER_HOSTNAME_DISABLE_MASTER_REVERSEDNS: &str = - "hbase.unsafe.regionserver.hostname.disable.master.reversedns"; pub const HBASE_UI_PORT_NAME_HTTP: &str = "ui-http"; pub const HBASE_UI_PORT_NAME_HTTPS: &str = "ui-https"; @@ -80,6 +78,8 @@ pub const HBASE_REST_UI_PORT: u16 = 8085; // This port is only used by Hbase prior to version 2.6 with a third-party JMX exporter. // Newer versions use the same port as the UI because Hbase provides it's own metrics API pub const METRICS_PORT: u16 = 9100; +pub const LISTENER_VOLUME_NAME: &str = "listener"; +pub const LISTENER_VOLUME_DIR: &str = "/stackable/listener"; const DEFAULT_REGION_MOVER_TIMEOUT: Duration = Duration::from_minutes_unchecked(59); const DEFAULT_REGION_MOVER_DELTA_TO_SHUTDOWN: Duration = Duration::from_minutes_unchecked(1); @@ -106,6 +106,9 @@ pub enum Error { #[snafu(display("incompatible merge types"))] IncompatibleMergeTypes, + + #[snafu(display("role-group is not valid"))] + NoRoleGroup, } #[versioned(version(name = "v1alpha1"))] @@ -175,18 +178,6 @@ pub mod versioned { /// for a ZooKeeper cluster. pub zookeeper_config_map_name: String, - /// This field controls which type of Service the Operator creates for this HbaseCluster: - /// - /// * cluster-internal: Use a ClusterIP service - /// - /// * external-unstable: Use a NodePort service - /// - /// This is a temporary solution with the goal to keep yaml manifests forward compatible. - /// In the future, this setting will control which [ListenerClass](DOCS_BASE_URL_PLACEHOLDER/listener-operator/listenerclass.html) - /// will be used to expose the service, and ListenerClass names will stay the same, allowing for a non-breaking change. - #[serde(default)] - pub listener_class: CurrentlySupportedListenerClasses, - /// Settings related to user [authentication](DOCS_BASE_URL_PLACEHOLDER/usage-guide/security). pub authentication: Option, @@ -216,6 +207,11 @@ impl v1alpha1::HbaseCluster { let defaults = AnyConfigFragment::default_for(role, &self.name_any(), hdfs_discovery_cm_name); + // Trivial values for role-groups are not allowed + if role_group.is_empty() { + return Err(Error::NoRoleGroup); + } + let (mut role_config, mut role_group_config) = match role { HbaseRole::RegionServer => { let role = self @@ -231,7 +227,9 @@ impl v1alpha1::HbaseCluster { .role_groups .get(role_group) .map(|rg| rg.config.config.clone()) - .unwrap_or_default(); + .expect( + "Cannot be empty as trivial values of role-group have already been checked", + ); ( AnyConfigFragment::RegionServer(role_config), @@ -253,7 +251,9 @@ impl v1alpha1::HbaseCluster { .role_groups .get(role_group) .map(|rg| rg.config.config.clone()) - .unwrap_or_default(); + .expect( + "Cannot be empty as trivial values of role-group have already been checked", + ); // Retrieve role resource config ( @@ -273,7 +273,9 @@ impl v1alpha1::HbaseCluster { .role_groups .get(role_group) .map(|rg| rg.config.config.clone()) - .unwrap_or_default(); + .expect( + "Cannot be empty as trivial values of role-group have already been checked", + ); // Retrieve role resource config ( @@ -539,7 +541,7 @@ impl v1alpha1::HbaseCluster { } /// Name of the port used by the Web UI, which depends on HTTPS usage - fn ui_port_name(&self) -> String { + pub fn ui_port_name(&self) -> String { if self.has_https_enabled() { HBASE_UI_PORT_NAME_HTTPS } else { @@ -565,27 +567,6 @@ pub fn merged_env(rolegroup_config: Option<&BTreeMap>) -> Vec String { - match self { - CurrentlySupportedListenerClasses::ClusterInternal => "ClusterIP".to_string(), - CurrentlySupportedListenerClasses::ExternalUnstable => "NodePort".to_string(), - } - } -} - #[derive(Clone, Debug, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct KerberosConfig { @@ -709,6 +690,7 @@ impl HbaseRole { affinity: get_affinity(cluster_name, self, hdfs_discovery_cm_name), graceful_shutdown_timeout: Some(graceful_shutdown_timeout), requested_secret_lifetime: Some(requested_secret_lifetime), + listener_class: Some("cluster-internal".to_string()), } } @@ -809,6 +791,7 @@ impl AnyConfigFragment { cli_opts: None, }, requested_secret_lifetime: Some(HbaseRole::DEFAULT_REGION_SECRET_LIFETIME), + listener_class: Some("cluster-internal".to_string()), }) } HbaseRole::RestServer => AnyConfigFragment::RestServer(HbaseConfigFragment { @@ -820,6 +803,7 @@ impl AnyConfigFragment { HbaseRole::DEFAULT_REST_SERVER_GRACEFUL_SHUTDOWN_TIMEOUT, ), requested_secret_lifetime: Some(HbaseRole::DEFAULT_REST_SECRET_LIFETIME), + listener_class: Some("cluster-internal".to_string()), }), HbaseRole::Master => AnyConfigFragment::Master(HbaseConfigFragment { hbase_rootdir: None, @@ -830,6 +814,7 @@ impl AnyConfigFragment { HbaseRole::DEFAULT_MASTER_GRACEFUL_SHUTDOWN_TIMEOUT, ), requested_secret_lifetime: Some(HbaseRole::DEFAULT_MASTER_SECRET_LIFETIME), + listener_class: Some("cluster-internal".to_string()), }), } } @@ -907,6 +892,9 @@ pub struct HbaseConfig { /// Please note that this can be shortened by the `maxCertificateLifetime` setting on the SecretClass issuing the TLS certificate. #[fragment_attrs(serde(default))] pub requested_secret_lifetime: Option, + + /// This field controls which [ListenerClass](DOCS_BASE_URL_PLACEHOLDER/listener-operator/listenerclass.html) is used to expose this rolegroup. + pub listener_class: String, } impl Configuration for HbaseConfigFragment { @@ -965,10 +953,6 @@ impl Configuration for HbaseConfigFragment { HBASE_CLUSTER_DISTRIBUTED.to_string(), Some("true".to_string()), ); - result.insert( - HBASE_UNSAFE_REGIONSERVER_HOSTNAME_DISABLE_MASTER_REVERSEDNS.to_string(), - Some("true".to_string()), - ); result.insert(HBASE_ROOTDIR.to_string(), self.hbase_rootdir.clone()); } _ => {} @@ -1060,6 +1044,9 @@ pub struct RegionServerConfig { /// The operator will compute a timeout period for the region move that will not exceed the graceful shutdown timeout. #[fragment_attrs(serde(default))] pub region_mover: RegionMover, + + /// This field controls which [ListenerClass](DOCS_BASE_URL_PLACEHOLDER/listener-operator/listenerclass.html) is used to expose this rolegroup. + pub listener_class: String, } impl Configuration for RegionServerConfigFragment { @@ -1116,10 +1103,6 @@ impl Configuration for RegionServerConfigFragment { HBASE_CLUSTER_DISTRIBUTED.to_string(), Some("true".to_string()), ); - result.insert( - HBASE_UNSAFE_REGIONSERVER_HOSTNAME_DISABLE_MASTER_REVERSEDNS.to_string(), - Some("true".to_string()), - ); result.insert(HBASE_ROOTDIR.to_string(), self.hbase_rootdir.clone()); } _ => {} @@ -1185,6 +1168,14 @@ impl AnyServiceConfig { } } + pub fn listener_class(&self) -> String { + match self { + AnyServiceConfig::Master(config) => config.listener_class.clone(), + AnyServiceConfig::RegionServer(config) => config.listener_class.clone(), + AnyServiceConfig::RestServer(config) => config.listener_class.clone(), + } + } + /// Returns command line arguments to pass on to the region mover tool. /// The following arguments are excluded because they are already part of the /// hbase-entrypoint.sh script. diff --git a/rust/operator-binary/src/hbase_controller.rs b/rust/operator-binary/src/hbase_controller.rs index 91554814..74fcbc7f 100644 --- a/rust/operator-binary/src/hbase_controller.rs +++ b/rust/operator-binary/src/hbase_controller.rs @@ -8,6 +8,7 @@ use std::{ }; use const_format::concatcp; +use indoc::formatdoc; use product_config::{ ProductConfigManager, types::PropertyNameKind, @@ -20,8 +21,14 @@ use stackable_operator::{ configmap::ConfigMapBuilder, meta::ObjectMetaBuilder, pod::{ - PodBuilder, container::ContainerBuilder, resources::ResourceRequirementsBuilder, + PodBuilder, + container::ContainerBuilder, + resources::ResourceRequirementsBuilder, security::PodSecurityContextBuilder, + volume::{ + ListenerOperatorVolumeSourceBuilder, ListenerOperatorVolumeSourceBuilderError, + ListenerReference, + }, }, }, cluster_resources::{ClusterResourceApplyStrategy, ClusterResources}, @@ -70,9 +77,11 @@ use crate::{ construct_role_specific_non_heap_jvm_args, }, crd::{ - APP_NAME, AnyServiceConfig, Container, HBASE_ENV_SH, HBASE_REST_PORT_NAME_HTTP, - HBASE_REST_PORT_NAME_HTTPS, HBASE_SITE_XML, HbaseClusterStatus, HbaseRole, - JVM_SECURITY_PROPERTIES_FILE, SSL_CLIENT_XML, SSL_SERVER_XML, merged_env, v1alpha1, + APP_NAME, AnyServiceConfig, Container, HBASE_ENV_SH, HBASE_MASTER_PORT, + HBASE_REGIONSERVER_PORT, HBASE_REST_PORT_NAME_HTTP, HBASE_REST_PORT_NAME_HTTPS, + HBASE_SITE_XML, HbaseClusterStatus, HbaseRole, JVM_SECURITY_PROPERTIES_FILE, + LISTENER_VOLUME_DIR, LISTENER_VOLUME_NAME, SSL_CLIENT_XML, SSL_SERVER_XML, merged_env, + v1alpha1, }, discovery::build_discovery_configmap, kerberos::{ @@ -312,6 +321,16 @@ pub enum Error { #[snafu(display("failed to construct JVM arguments"))] ConstructJvmArgument { source: crate::config::jvm::Error }, + + #[snafu(display("failed to build Labels"))] + LabelBuild { + source: stackable_operator::kvp::LabelError, + }, + + #[snafu(display("failed to build listener volume"))] + BuildListenerVolume { + source: ListenerOperatorVolumeSourceBuilderError, + }, } type Result = std::result::Result; @@ -375,26 +394,6 @@ pub async fn reconcile_hbase( ) .context(CreateClusterResourcesSnafu)?; - let region_server_role_service = - build_region_server_role_service(hbase, &resolved_product_image)?; - cluster_resources - .add(client, region_server_role_service) - .await - .context(ApplyRoleServiceSnafu)?; - - // discovery config map - let discovery_cm = build_discovery_configmap( - hbase, - &client.kubernetes_cluster_info, - &zookeeper_connection_information, - &resolved_product_image, - ) - .context(BuildDiscoveryConfigMapSnafu)?; - cluster_resources - .add(client, discovery_cm) - .await - .context(ApplyDiscoveryConfigMapSnafu)?; - let (rbac_sa, rbac_rolebinding) = build_rbac_resources( hbase, APP_NAME, @@ -443,7 +442,6 @@ pub async fn reconcile_hbase( )?; let rg_statefulset = build_rolegroup_statefulset( hbase, - &client.kubernetes_cluster_info, &hbase_role, &rolegroup, rolegroup_config, @@ -484,6 +482,20 @@ pub async fn reconcile_hbase( } } + // Discovery CM will fail to build until the rest of the cluster has been deployed, so do it last + // so that failure won't inhibit the rest of the cluster from booting up. + let discovery_cm = build_discovery_configmap( + hbase, + &client.kubernetes_cluster_info, + &zookeeper_connection_information, + &resolved_product_image, + ) + .context(BuildDiscoveryConfigMapSnafu)?; + cluster_resources + .add(client, discovery_cm) + .await + .context(ApplyDiscoveryConfigMapSnafu)?; + let cluster_operation_cond_builder = ClusterOperationsConditionBuilder::new(&hbase.spec.cluster_operation); @@ -503,59 +515,6 @@ pub async fn reconcile_hbase( Ok(Action::await_change()) } -/// The server-role service is the primary endpoint that should be used by clients that do not perform internal load balancing, -/// including targets outside of the cluster. -pub fn build_region_server_role_service( - hbase: &v1alpha1::HbaseCluster, - resolved_product_image: &ResolvedProductImage, -) -> Result { - let role = HbaseRole::RegionServer; - let role_name = role.to_string(); - let role_svc_name = hbase - .server_role_service_name() - .context(GlobalServiceNameNotFoundSnafu)?; - let ports = hbase - .ports(&role, &resolved_product_image.product_version) - .into_iter() - .map(|(name, value)| ServicePort { - name: Some(name), - port: i32::from(value), - protocol: Some("TCP".to_string()), - ..ServicePort::default() - }) - .collect(); - - let metadata = ObjectMetaBuilder::new() - .name_and_namespace(hbase) - .name(&role_svc_name) - .ownerreference_from_resource(hbase, None, Some(true)) - .context(ObjectMissingMetadataForOwnerRefSnafu)? - .with_recommended_labels(build_recommended_labels( - hbase, - &resolved_product_image.app_version_label, - &role_name, - "global", - )) - .context(ObjectMetaSnafu)? - .build(); - - let service_selector_labels = - Labels::role_selector(hbase, APP_NAME, &role_name).context(BuildLabelSnafu)?; - - let service_spec = ServiceSpec { - type_: Some(hbase.spec.cluster_config.listener_class.k8s_service_type()), - ports: Some(ports), - selector: Some(service_selector_labels.into()), - ..ServiceSpec::default() - }; - - Ok(Service { - metadata, - spec: Some(service_spec), - status: None, - }) -} - /// The rolegroup [`ConfigMap`] configures the rolegroup based on the configuration given by the administrator #[allow(clippy::too_many_arguments)] fn build_rolegroup_config_map( @@ -590,6 +549,63 @@ fn build_rolegroup_config_map( hbase_site_config .extend(hbase_opa_config.map_or(vec![], |config| config.hbase_site_config())); + // Get more useful stack traces... + // The default Netty impl gives us netty garbage and nothing else + hbase_site_config.insert( + "hbase.rpc.client.impl".to_string(), + "org.apache.hadoop.hbase.ipc.BlockingRpcClient".to_string(), + ); + + // Set listener endpoint information with generic properties + hbase_site_config.insert( + "hbase.listener.endpoint".to_string(), + "${HBASE_LISTENER_ENDPOINT}".to_string(), + ); + hbase_site_config.insert( + "hbase.info.port".to_string(), + "${HBASE_INFO_PORT}".to_string(), + ); + + match hbase_role { + HbaseRole::Master => { + hbase_site_config.insert( + "hbase.master.ipc.address".to_string(), + "0.0.0.0".to_string(), + ); + hbase_site_config.insert( + "hbase.master.ipc.port".to_string(), + HBASE_MASTER_PORT.to_string(), + ); + hbase_site_config.insert( + "hbase.master.hostname".to_string(), + "${HBASE_SERVICE_HOST}".to_string(), + ); + hbase_site_config.insert( + "hbase.master.port".to_string(), + "${HBASE_SERVICE_PORT}".to_string(), + ); + } + HbaseRole::RegionServer => { + hbase_site_config.insert( + "hbase.regionserver.ipc.address".to_string(), + "0.0.0.0".to_string(), + ); + hbase_site_config.insert( + "hbase.regionserver.ipc.port".to_string(), + HBASE_REGIONSERVER_PORT.to_string(), + ); + hbase_site_config.insert( + "hbase.unsafe.regionserver.hostname".to_string(), + "${HBASE_SERVICE_HOST}".to_string(), + ); + hbase_site_config.insert( + "hbase.regionserver.port".to_string(), + "${HBASE_SERVICE_PORT}".to_string(), + ); + } + HbaseRole::RestServer => {} + }; + // configOverride come last hbase_site_config.extend(config.clone()); hbase_site_xml = to_hadoop_xml( @@ -732,7 +748,7 @@ fn build_rolegroup_service( let metadata = ObjectMetaBuilder::new() .name_and_namespace(hbase) - .name(rolegroup.object_name()) + .name(format!("{name}-metrics", name = rolegroup.object_name())) .ownerreference_from_resource(hbase, None, Some(true)) .context(ObjectMissingMetadataForOwnerRefSnafu)? .with_recommended_labels(build_recommended_labels( @@ -772,7 +788,6 @@ fn build_rolegroup_service( #[allow(clippy::too_many_arguments)] fn build_rolegroup_statefulset( hbase: &v1alpha1::HbaseCluster, - cluster_info: &KubernetesClusterInfo, hbase_role: &HbaseRole, rolegroup_ref: &RoleGroupRef, rolegroup_config: &HashMap>, @@ -867,15 +882,30 @@ fn build_rolegroup_statefulset( }, ]); + let role_name = hbase_role.cli_role_name(); let mut hbase_container = ContainerBuilder::new("hbase").expect("ContainerBuilder not created"); + + let rest_http_port_name = if hbase.has_https_enabled() { + HBASE_REST_PORT_NAME_HTTPS + } else { + HBASE_REST_PORT_NAME_HTTP + }; + hbase_container .image_from_product_image(resolved_product_image) - .command(vec!["/stackable/hbase/bin/hbase-entrypoint.sh".to_string()]) - .args(vec![ - hbase_role.cli_role_name(), - hbase_service_domain_name(hbase, rolegroup_ref, cluster_info)?, - hbase.service_port(hbase_role).to_string(), - ]) + .command(command()) + .args(vec![formatdoc! {" + {entrypoint} {role} {port} {port_name} {ui_port_name}", + entrypoint = "/stackable/hbase/bin/hbase-entrypoint.sh".to_string(), + role = role_name, + port = hbase.service_port(hbase_role).to_string(), + port_name = match hbase_role { + HbaseRole::Master => "master", + HbaseRole::RegionServer => "regionserver", + HbaseRole::RestServer => rest_http_port_name, + }, + ui_port_name = hbase.ui_port_name(), + }]) .add_env_vars(merged_env) // Needed for the `containerdebug` process to log it's tracing information to. .add_env_var( @@ -890,6 +920,8 @@ fn build_rolegroup_statefulset( .context(AddVolumeMountSnafu)? .add_volume_mount("log", STACKABLE_LOG_DIR) .context(AddVolumeMountSnafu)? + .add_volume_mount(LISTENER_VOLUME_NAME, LISTENER_VOLUME_DIR) + .context(AddVolumeMountSnafu)? .add_container_ports(ports) .resources(merged_config.resources().clone().into()) .startup_probe(startup_probe) @@ -898,13 +930,17 @@ fn build_rolegroup_statefulset( let mut pod_builder = PodBuilder::new(); + let recommended_object_labels = build_recommended_labels( + hbase, + hbase_version, + &rolegroup_ref.role, + &rolegroup_ref.role_group, + ); + let recommended_labels = + Labels::recommended(recommended_object_labels.clone()).context(LabelBuildSnafu)?; + let pb_metadata = ObjectMetaBuilder::new() - .with_recommended_labels(build_recommended_labels( - hbase, - hbase_version, - &rolegroup_ref.role, - &rolegroup_ref.role_group, - )) + .with_recommended_labels(recommended_object_labels) .context(ObjectMetaSnafu)? .build(); @@ -940,6 +976,15 @@ fn build_rolegroup_statefulset( .service_account_name(service_account.name_any()) .security_context(PodSecurityContextBuilder::new().fs_group(1000).build()); + // externally-reachable listener endpoints should use a pvc volume... + let pvc = ListenerOperatorVolumeSourceBuilder::new( + &ListenerReference::ListenerClass(merged_config.listener_class().to_string()), + &recommended_labels, + ) + .context(BuildListenerVolumeSnafu)? + .build_pvc(LISTENER_VOLUME_NAME.to_string()) + .context(BuildListenerVolumeSnafu)?; + if let Some(ContainerLogConfig { choice: Some(ContainerLogConfigChoice::Custom(CustomContainerLogConfig { @@ -1042,8 +1087,12 @@ fn build_rolegroup_statefulset( match_labels: Some(statefulset_match_labels.into()), ..LabelSelector::default() }, - service_name: Some(rolegroup_ref.object_name()), + service_name: Some(format!( + "{name}-metrics", + name = rolegroup_ref.object_name() + )), template: pod_template, + volume_claim_templates: Some(vec![pvc]), ..StatefulSetSpec::default() }; @@ -1054,6 +1103,17 @@ fn build_rolegroup_statefulset( }) } +/// Returns the container command. +fn command() -> Vec { + vec![ + "/bin/bash".to_string(), + "-x".to_string(), + "-euo".to_string(), + "pipefail".to_string(), + "-c".to_string(), + ] +} + fn write_hbase_env_sh<'a, T>(properties: T) -> String where T: Iterator, @@ -1116,6 +1176,7 @@ fn build_hbase_env_sh( let role_specific_non_heap_jvm_args = construct_role_specific_non_heap_jvm_args(hbase, hbase_role, role_group, product_version) .context(ConstructJvmArgumentSnafu)?; + match hbase_role { HbaseRole::Master => { result.insert( @@ -1158,28 +1219,6 @@ fn validate_cr(hbase: &v1alpha1::HbaseCluster) -> Result<()> { Ok(()) } -/// Build the domain name of an HBase service pod. -/// The hbase-entrypoint.sh script uses this to build the fully qualified name of a pod -/// by appending it to the `HOSTNAME` environment variable. -/// This name is required by the RegionMover to function properly. -fn hbase_service_domain_name( - hbase: &v1alpha1::HbaseCluster, - rolegroup_ref: &RoleGroupRef, - cluster_info: &KubernetesClusterInfo, -) -> Result { - let hbase_cluster_name = rolegroup_ref.object_name(); - let pod_namespace = hbase - .metadata - .namespace - .clone() - .context(ObjectHasNoNamespaceSnafu)?; - let cluster_domain = &cluster_info.cluster_domain; - - Ok(format!( - "{hbase_cluster_name}.{pod_namespace}.svc.{cluster_domain}" - )) -} - #[cfg(test)] mod test { use rstest::rstest; diff --git a/tests/templates/kuttl/cluster-operation/03-install-hbase.yaml.j2 b/tests/templates/kuttl/cluster-operation/03-install-hbase.yaml.j2 index 4732f740..6989c892 100644 --- a/tests/templates/kuttl/cluster-operation/03-install-hbase.yaml.j2 +++ b/tests/templates/kuttl/cluster-operation/03-install-hbase.yaml.j2 @@ -29,6 +29,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 @@ -37,6 +38,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 @@ -45,6 +47,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/cluster-operation/10-pause-hbase.yaml.j2 b/tests/templates/kuttl/cluster-operation/10-pause-hbase.yaml.j2 index ffc8c7c0..0431cf88 100644 --- a/tests/templates/kuttl/cluster-operation/10-pause-hbase.yaml.j2 +++ b/tests/templates/kuttl/cluster-operation/10-pause-hbase.yaml.j2 @@ -32,6 +32,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 @@ -40,6 +41,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 @@ -48,6 +50,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 2 # ignored because reconciliation is paused diff --git a/tests/templates/kuttl/cluster-operation/20-stop-hbase.yaml.j2 b/tests/templates/kuttl/cluster-operation/20-stop-hbase.yaml.j2 index 0f4c5665..8bc4007f 100644 --- a/tests/templates/kuttl/cluster-operation/20-stop-hbase.yaml.j2 +++ b/tests/templates/kuttl/cluster-operation/20-stop-hbase.yaml.j2 @@ -32,6 +32,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 @@ -40,6 +41,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 @@ -48,6 +50,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 # set to 0 by the operator because cluster is stopped diff --git a/tests/templates/kuttl/cluster-operation/30-restart-hbase.yaml.j2 b/tests/templates/kuttl/cluster-operation/30-restart-hbase.yaml.j2 index 388110b2..9a29aff5 100644 --- a/tests/templates/kuttl/cluster-operation/30-restart-hbase.yaml.j2 +++ b/tests/templates/kuttl/cluster-operation/30-restart-hbase.yaml.j2 @@ -32,6 +32,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 @@ -40,6 +41,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 @@ -48,6 +50,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/external-access/00-assert.yaml.j2 b/tests/templates/kuttl/external-access/00-assert.yaml.j2 new file mode 100644 index 00000000..50b1d4c3 --- /dev/null +++ b/tests/templates/kuttl/external-access/00-assert.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +{% endif %} diff --git a/tests/templates/kuttl/external-access/00-install-vector-aggregator-discovery-configmap.yaml.j2 b/tests/templates/kuttl/external-access/00-install-vector-aggregator-discovery-configmap.yaml.j2 new file mode 100644 index 00000000..2d6a0df5 --- /dev/null +++ b/tests/templates/kuttl/external-access/00-install-vector-aggregator-discovery-configmap.yaml.j2 @@ -0,0 +1,9 @@ +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +data: + ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} +{% endif %} diff --git a/tests/templates/kuttl/external-access/00-patch-ns.yaml.j2 b/tests/templates/kuttl/external-access/00-patch-ns.yaml.j2 new file mode 100644 index 00000000..67185acf --- /dev/null +++ b/tests/templates/kuttl/external-access/00-patch-ns.yaml.j2 @@ -0,0 +1,9 @@ +{% if test_scenario['values']['openshift'] == 'true' %} +# see https://github.com/stackabletech/issues/issues/566 +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: kubectl patch namespace $NAMESPACE -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' + timeout: 120 +{% endif %} diff --git a/tests/templates/kuttl/external-access/00-rbac.yaml.j2 b/tests/templates/kuttl/external-access/00-rbac.yaml.j2 new file mode 100644 index 00000000..7ee61d23 --- /dev/null +++ b/tests/templates/kuttl/external-access/00-rbac.yaml.j2 @@ -0,0 +1,29 @@ +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: test-role +rules: +{% if test_scenario['values']['openshift'] == "true" %} + - apiGroups: ["security.openshift.io"] + resources: ["securitycontextconstraints"] + resourceNames: ["privileged"] + verbs: ["use"] +{% endif %} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-sa +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: test-rb +subjects: + - kind: ServiceAccount + name: test-sa +roleRef: + kind: Role + name: test-role + apiGroup: rbac.authorization.k8s.io diff --git a/tests/templates/kuttl/external-access/01-assert.yaml b/tests/templates/kuttl/external-access/01-assert.yaml new file mode 100644 index 00000000..e0766c49 --- /dev/null +++ b/tests/templates/kuttl/external-access/01-assert.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-zk-server-default +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/external-access/01-install-zookeeper.yaml.j2 b/tests/templates/kuttl/external-access/01-install-zookeeper.yaml.j2 new file mode 100644 index 00000000..0a331d50 --- /dev/null +++ b/tests/templates/kuttl/external-access/01-install-zookeeper.yaml.j2 @@ -0,0 +1,29 @@ +--- +apiVersion: zookeeper.stackable.tech/v1alpha1 +kind: ZookeeperCluster +metadata: + name: test-zk +spec: + image: + productVersion: "{{ test_scenario['values']['zookeeper-latest'] }}" + pullPolicy: IfNotPresent +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + clusterConfig: + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} + servers: + config: + gracefulShutdownTimeout: 1m + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleGroups: + default: + replicas: 1 +--- +apiVersion: zookeeper.stackable.tech/v1alpha1 +kind: ZookeeperZnode +metadata: + name: test-znode +spec: + clusterRef: + name: test-zk diff --git a/tests/templates/kuttl/external-access/02-assert.yaml b/tests/templates/kuttl/external-access/02-assert.yaml new file mode 100644 index 00000000..99b25f8e --- /dev/null +++ b/tests/templates/kuttl/external-access/02-assert.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-hdfs-namenode-default +status: + readyReplicas: 2 + replicas: 2 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-hdfs-journalnode-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-hdfs-datanode-default +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/external-access/02-install-hdfs.yaml.j2 b/tests/templates/kuttl/external-access/02-install-hdfs.yaml.j2 new file mode 100644 index 00000000..f9194a60 --- /dev/null +++ b/tests/templates/kuttl/external-access/02-install-hdfs.yaml.j2 @@ -0,0 +1,39 @@ +--- +apiVersion: hdfs.stackable.tech/v1alpha1 +kind: HdfsCluster +metadata: + name: test-hdfs +spec: + image: + productVersion: "{{ test_scenario['values']['hdfs-latest'] }}" + pullPolicy: IfNotPresent + clusterConfig: + dfsReplication: 1 + zookeeperConfigMapName: test-znode +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} + nameNodes: + config: + gracefulShutdownTimeout: 1m + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleGroups: + default: + replicas: 2 + dataNodes: + config: + gracefulShutdownTimeout: 1m + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleGroups: + default: + replicas: 1 + journalNodes: + config: + gracefulShutdownTimeout: 1m + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleGroups: + default: + replicas: 1 diff --git a/tests/templates/kuttl/external-access/10-listener-classes.yaml b/tests/templates/kuttl/external-access/10-listener-classes.yaml new file mode 100644 index 00000000..893032c5 --- /dev/null +++ b/tests/templates/kuttl/external-access/10-listener-classes.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + envsubst < listener-classes.yaml | kubectl apply -n $NAMESPACE -f - diff --git a/tests/templates/kuttl/external-access/20-assert.yaml.j2 b/tests/templates/kuttl/external-access/20-assert.yaml.j2 new file mode 100644 index 00000000..223cc784 --- /dev/null +++ b/tests/templates/kuttl/external-access/20-assert.yaml.j2 @@ -0,0 +1,96 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +metadata: + name: test-available-condition +timeout: 600 +commands: + - script: kubectl -n $NAMESPACE wait --for=condition=available hbaseclusters.hbase.stackable.tech/hbase --timeout 301s +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +metadata: + name: test-hbase +timeout: 1200 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-hbase-master-external-unstable +status: + readyReplicas: 2 + replicas: 2 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-hbase-regionserver-external-unstable +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-hbase-regionserver-external-stable +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-hbase-restserver-external-unstable +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-hbase-restserver-external-stable +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: listener-test-hbase-master-external-unstable-0 +spec: + type: NodePort +--- +apiVersion: v1 +kind: Service +metadata: + name: listener-test-hbase-master-external-unstable-1 +spec: + type: NodePort +--- +apiVersion: v1 +kind: Service +metadata: + name: listener-test-hbase-regionserver-external-stable-0 +spec: + type: NodePort +--- +apiVersion: v1 +kind: Service +metadata: + name: listener-test-hbase-regionserver-external-unstable-0 +spec: + type: NodePort +--- +apiVersion: v1 +kind: Service +metadata: + name: listener-test-hbase-restserver-external-stable-0 +spec: + type: NodePort +--- +apiVersion: v1 +kind: Service +metadata: + name: listener-test-hbase-restserver-external-unstable-0 +spec: + type: NodePort diff --git a/tests/templates/kuttl/external-access/20-install-hbase.yaml b/tests/templates/kuttl/external-access/20-install-hbase.yaml new file mode 100644 index 00000000..843b488d --- /dev/null +++ b/tests/templates/kuttl/external-access/20-install-hbase.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +timeout: 600 +commands: + - script: > + envsubst < install-hbase.yaml | + kubectl apply -n $NAMESPACE -f - diff --git a/tests/templates/kuttl/external-access/install-hbase.yaml.j2 b/tests/templates/kuttl/external-access/install-hbase.yaml.j2 new file mode 100644 index 00000000..690577b1 --- /dev/null +++ b/tests/templates/kuttl/external-access/install-hbase.yaml.j2 @@ -0,0 +1,55 @@ +--- +apiVersion: hbase.stackable.tech/v1alpha1 +kind: HbaseCluster +metadata: + name: test-hbase +spec: + image: +{% if test_scenario['values']['hbase'].find(",") > 0 %} + custom: "{{ test_scenario['values']['hbase'].split(',')[1] }}" + productVersion: "{{ test_scenario['values']['hbase'].split(',')[0] }}" +{% else %} + productVersion: "{{ test_scenario['values']['hbase'] }}" +{% endif %} + pullPolicy: IfNotPresent + clusterConfig: + hdfsConfigMapName: test-hdfs-namenode-default + zookeeperConfigMapName: test-znode +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} + masters: + config: + gracefulShutdownTimeout: 1m + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: test-external-unstable-$NAMESPACE + roleGroups: + external-unstable: + replicas: 2 + regionServers: + config: + gracefulShutdownTimeout: 1m + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: test-external-unstable-$NAMESPACE + roleGroups: + external-unstable: + replicas: 1 + external-stable: + replicas: 1 + config: + listenerClass: test-external-stable-$NAMESPACE + restServers: + config: + gracefulShutdownTimeout: 1m + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: test-external-stable-$NAMESPACE + roleGroups: + external-stable: + replicas: 1 + external-unstable: + replicas: 1 + config: + listenerClass: test-external-unstable-$NAMESPACE diff --git a/tests/templates/kuttl/external-access/listener-classes.yaml b/tests/templates/kuttl/external-access/listener-classes.yaml new file mode 100644 index 00000000..4131526a --- /dev/null +++ b/tests/templates/kuttl/external-access/listener-classes.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: listeners.stackable.tech/v1alpha1 +kind: ListenerClass +metadata: + name: test-cluster-internal-$NAMESPACE +spec: + serviceType: ClusterIP +--- +apiVersion: listeners.stackable.tech/v1alpha1 +kind: ListenerClass +metadata: + name: test-external-stable-$NAMESPACE +spec: + serviceType: NodePort +--- +apiVersion: listeners.stackable.tech/v1alpha1 +kind: ListenerClass +metadata: + name: test-external-unstable-$NAMESPACE +spec: + serviceType: NodePort diff --git a/tests/templates/kuttl/kerberos/30-install-hbase.yaml.j2 b/tests/templates/kuttl/kerberos/30-install-hbase.yaml.j2 index 28766a48..a21046d1 100644 --- a/tests/templates/kuttl/kerberos/30-install-hbase.yaml.j2 +++ b/tests/templates/kuttl/kerberos/30-install-hbase.yaml.j2 @@ -28,7 +28,6 @@ commands: clusterConfig: hdfsConfigMapName: hdfs zookeeperConfigMapName: hbase-znode - listenerClass: {{ test_scenario['values']['listener-class'] }} authentication: tlsSecretClass: tls kerberos: @@ -41,6 +40,7 @@ commands: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: {{ test_scenario['values']['listener-class'] }} resources: memory: limit: 1536Mi @@ -52,6 +52,7 @@ commands: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: {{ test_scenario['values']['listener-class'] }} roleGroups: default: replicas: 2 @@ -60,6 +61,7 @@ commands: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: {{ test_scenario['values']['listener-class'] }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/kerberos/42-test-rest-server.yaml b/tests/templates/kuttl/kerberos/42-test-rest-server.yaml index 5b29af5a..73515e9c 100644 --- a/tests/templates/kuttl/kerberos/42-test-rest-server.yaml +++ b/tests/templates/kuttl/kerberos/42-test-rest-server.yaml @@ -14,7 +14,7 @@ spec: - /bin/bash - -c - | - status_code=$(curl --write-out '%{http_code}' --silent --insecure --output /dev/null "https://hbase-restserver-default:8080") + status_code=$(curl --write-out '%{http_code}' --silent --insecure --output /dev/null "https://hbase-restserver-default-metrics:8080") if [[ "$status_code" -eq 401 ]] ; then echo "[PASS] Successfully got 401 as we did not authenticate" diff --git a/tests/templates/kuttl/opa/30-install-hbase.yaml.j2 b/tests/templates/kuttl/opa/30-install-hbase.yaml.j2 index 92fda28c..b2d01a8f 100644 --- a/tests/templates/kuttl/opa/30-install-hbase.yaml.j2 +++ b/tests/templates/kuttl/opa/30-install-hbase.yaml.j2 @@ -51,7 +51,6 @@ commands: clusterConfig: hdfsConfigMapName: hdfs zookeeperConfigMapName: hbase-znode - listenerClass: 'cluster-internal' authentication: tlsSecretClass: tls kerberos: diff --git a/tests/templates/kuttl/profiling/run-profiler.py b/tests/templates/kuttl/profiling/run-profiler.py index 7afe0d71..d5edc7d5 100644 --- a/tests/templates/kuttl/profiling/run-profiler.py +++ b/tests/templates/kuttl/profiling/run-profiler.py @@ -8,8 +8,7 @@ def start_profiling_and_get_refresh_header(service_url): prof_page = requests.get( - f"{service_url}/prof" - f"?event={EVENT_TYPE}&duration={PROFILING_DURATION_IN_SEC}" + f"{service_url}/prof?event={EVENT_TYPE}&duration={PROFILING_DURATION_IN_SEC}" ) assert prof_page.ok, f"""Profiling could not be started. @@ -56,9 +55,7 @@ def fetch_flamegraph(service_url, refresh_path): def test_profiling(role, port): - service_url = ( - f"http://test-hbase-{role}-default-0.test-hbase-{role}-default" f":{port}" - ) + service_url = f"http://test-hbase-{role}-default-metrics:{port}" print(f"Test profiling on {service_url}") diff --git a/tests/templates/kuttl/shutdown/30-install-hbase.yaml.j2 b/tests/templates/kuttl/shutdown/30-install-hbase.yaml.j2 index 4ef58b80..e05aa96b 100644 --- a/tests/templates/kuttl/shutdown/30-install-hbase.yaml.j2 +++ b/tests/templates/kuttl/shutdown/30-install-hbase.yaml.j2 @@ -15,7 +15,6 @@ spec: clusterConfig: hdfsConfigMapName: test-hdfs zookeeperConfigMapName: test-znode - listenerClass: "cluster-internal" {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/smoke/30-install-hbase.yaml.j2 b/tests/templates/kuttl/smoke/30-install-hbase.yaml.j2 index 53e9a98e..7535a3e8 100644 --- a/tests/templates/kuttl/smoke/30-install-hbase.yaml.j2 +++ b/tests/templates/kuttl/smoke/30-install-hbase.yaml.j2 @@ -15,7 +15,6 @@ spec: clusterConfig: hdfsConfigMapName: test-hdfs zookeeperConfigMapName: test-znode - listenerClass: {{ test_scenario['values']['listener-class'] }} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} @@ -23,6 +22,7 @@ spec: config: logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: {{ test_scenario['values']['listener-class'] }} roleGroups: default: configOverrides: @@ -34,6 +34,7 @@ spec: config: logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: {{ test_scenario['values']['listener-class'] }} roleGroups: default: configOverrides: @@ -45,6 +46,7 @@ spec: config: logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: {{ test_scenario['values']['listener-class'] }} resources: memory: limit: 1Gi diff --git a/tests/templates/kuttl/smoke/50-assert.yaml b/tests/templates/kuttl/smoke/50-assert.yaml index b7f37813..e907c023 100644 --- a/tests/templates/kuttl/smoke/50-assert.yaml +++ b/tests/templates/kuttl/smoke/50-assert.yaml @@ -4,5 +4,5 @@ kind: TestAssert metadata: name: test-hbase commands: - - script: kubectl exec --namespace=$NAMESPACE hbase-test-runner-0 -- python /tmp/test-hbase.py http://test-hbase-restserver-default:8080 + - script: kubectl exec --namespace=$NAMESPACE hbase-test-runner-0 -- python /tmp/test-hbase.py http://test-hbase-restserver-default-metrics:8080 timeout: 240 diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index 240a53f8..e4ec1e40 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -137,6 +137,12 @@ tests: - hdfs-latest - zookeeper-latest - openshift + - name: external-access + dimensions: + - hbase + - hdfs-latest + - zookeeper-latest + - openshift suites: - name: nightly patch: