Skip to content

Commit 287436a

Browse files
committed
fix: e2e changes to remove v1.23
This changes the EKS upgrade test to use a newer version of Kubernetes. It was using v1.23 which isn't supported. Also, removed old CSI tests as they where testing upgrades to v1.23 for unmanaged clusters. Signed-off-by: Richard Case <richard.case@outlook.com>
1 parent 85759ce commit 287436a

File tree

5 files changed

+16
-231
lines changed

5 files changed

+16
-231
lines changed

test/e2e/data/e2e_conf.yaml

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -180,9 +180,6 @@ variables:
180180
KUBERNETES_VERSION: "v1.29.9"
181181
KUBERNETES_VERSION_UPGRADE_TO: "v1.29.9"
182182
KUBERNETES_VERSION_UPGRADE_FROM: "v1.29.8"
183-
# Pre and post 1.23 Kubernetes versions are being used for CSI upgrade tests
184-
PRE_1_23_KUBERNETES_VERSION: "v1.22.17"
185-
POST_1_23_KUBERNETES_VERSION: "v1.23.15"
186183
CNI: "../../data/cni/calico.yaml"
187184
KUBETEST_CONFIGURATION: "../../data/kubetest/conformance.yaml"
188185
EVENT_BRIDGE_INSTANCE_STATE: "true"

test/e2e/data/e2e_eks_conf.yaml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ images:
1616
- name: gcr.io/k8s-staging-cluster-api/capa-manager:e2e
1717
loadBehavior: mustLoad
1818

19-
## PLEASE KEEP THESE UP TO DATE WITH THE COMPONENTS
19+
## PLEASE KEEP THESE UP TO DATE WITH THE COMPONENTS
2020
- name: quay.io/jetstack/cert-manager-cainjector:v1.15.1
2121
loadBehavior: tryLoad
2222
- name: quay.io/jetstack/cert-manager-webhook:v1.15.1
@@ -118,6 +118,8 @@ providers:
118118
variables:
119119
KUBERNETES_VERSION: "v1.30.2"
120120
KUBERNETES_VERSION_MANAGEMENT: "v1.30.0" # Kind bootstrap
121+
UPGRADE_FROM_VERSION: "v1.30.0"
122+
UPGRADE_TO_VERSION: "v1.31.0"
121123
EXP_MACHINE_POOL: "true"
122124
EXP_CLUSTER_RESOURCE_SET: "true"
123125
EVENT_BRIDGE_INSTANCE_STATE: "true"

test/e2e/shared/defaults.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,11 +66,11 @@ const (
6666
StorageClassOutTreeZoneLabel = "topology.ebs.csi.aws.com/zone"
6767
GPUFlavor = "gpu"
6868
InstanceVcpu = "AWS_MACHINE_TYPE_VCPU_USAGE"
69-
PreCSIKubernetesVer = "PRE_1_23_KUBERNETES_VERSION"
70-
PostCSIKubernetesVer = "POST_1_23_KUBERNETES_VERSION"
7169
EFSSupport = "efs-support"
7270
IntreeCloudProvider = "intree-cloud-provider"
7371
MultiTenancy = "MULTI_TENANCY_"
72+
EksUpgradeFromVersion = "UPGRADE_FROM_VERSION"
73+
EksUpgradeToVersion = "UPGRADE_TO_VERSION"
7474
)
7575

7676
// ResourceQuotaFilePath is the path to the file that contains the resource usage.

test/e2e/suites/managed/upgrade_test.go

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -35,27 +35,29 @@ import (
3535

3636
// EKS cluster upgrade tests.
3737
var _ = ginkgo.Describe("EKS Cluster upgrade test", func() {
38-
const (
39-
initialVersion = "v1.23.6"
40-
upgradeToVersion = "v1.24.4"
41-
)
4238
var (
43-
namespace *corev1.Namespace
44-
ctx context.Context
45-
specName = "eks-upgrade"
46-
clusterName string
39+
namespace *corev1.Namespace
40+
ctx context.Context
41+
specName = "eks-upgrade"
42+
clusterName string
43+
initialVersion string
44+
upgradeToVersion string
4745
)
4846

4947
shared.ConditionalIt(runUpgradeTests, "[managed] [upgrade] should create a cluster and upgrade the kubernetes version", func() {
5048
ginkgo.By("should have a valid test configuration")
5149
Expect(e2eCtx.Environment.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. BootstrapClusterProxy can't be nil")
5250
Expect(e2eCtx.E2EConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName)
53-
Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.KubernetesVersion))
51+
Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.EksUpgradeFromVersion))
52+
Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.EksUpgradeToVersion))
5453

5554
ctx = context.TODO()
5655
namespace = shared.SetupSpecNamespace(ctx, specName, e2eCtx)
5756
clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6))
5857

58+
initialVersion = e2eCtx.E2EConfig.GetVariable(shared.EksUpgradeFromVersion)
59+
upgradeToVersion = e2eCtx.E2EConfig.GetVariable(shared.EksUpgradeToVersion)
60+
5961
ginkgo.By("default iam role should exist")
6062
VerifyRoleExistsAndOwned(ekscontrolplanev1.DefaultEKSControlPlaneRole, clusterName, false, e2eCtx.BootstrapUserAWSSession)
6163

test/e2e/suites/unmanaged/unmanaged_functional_test.go

Lines changed: 0 additions & 216 deletions
Original file line numberDiff line numberDiff line change
@@ -305,222 +305,6 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() {
305305
})
306306
})
307307

308-
// todo: Fix and enable back the tests ASAP.
309-
ginkgo.PDescribe("CSI=in-tree CCM=in-tree AWSCSIMigration=off: upgrade to v1.23", func() {
310-
ginkgo.It("should create volumes dynamically with in tree CSI driver and in tree cloud provider", func() {
311-
specName := "csimigration-off-upgrade"
312-
if !e2eCtx.Settings.SkipQuotas {
313-
requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50}
314-
requiredResources.WriteRequestedResources(e2eCtx, specName)
315-
Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
316-
defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))
317-
}
318-
namespace := shared.SetupNamespace(ctx, specName, e2eCtx)
319-
defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx)
320-
321-
ginkgo.By("Creating first cluster with single control plane")
322-
cluster1Name := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
323-
configCluster := defaultConfigCluster(cluster1Name, namespace.Name)
324-
configCluster.KubernetesVersion = e2eCtx.E2EConfig.GetVariable(shared.PreCSIKubernetesVer)
325-
configCluster.WorkerMachineCount = ptr.To[int64](1)
326-
configCluster.Flavor = shared.IntreeCloudProvider
327-
createCluster(ctx, configCluster, result)
328-
329-
// Create statefulSet with PVC and confirm it is working with in-tree providers
330-
nginxStatefulsetInfo := createStatefulSetInfo(true, "intree")
331-
332-
ginkgo.By("Deploying StatefulSet on infra")
333-
clusterClient := e2eCtx.Environment.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, cluster1Name).GetClient()
334-
335-
createStatefulSet(nginxStatefulsetInfo, clusterClient)
336-
awsVolIDs := getVolumeIDs(nginxStatefulsetInfo, clusterClient)
337-
verifyVolumesExists(awsVolIDs)
338-
339-
kubernetesUgradeVersion := e2eCtx.E2EConfig.GetVariable(shared.PostCSIKubernetesVer)
340-
configCluster.KubernetesVersion = kubernetesUgradeVersion
341-
configCluster.Flavor = "csimigration-off"
342-
343-
cluster2, _, kcp := createCluster(ctx, configCluster, result)
344-
345-
ginkgo.By("Waiting for control-plane machines to have the upgraded kubernetes version")
346-
framework.WaitForControlPlaneMachinesToBeUpgraded(ctx, framework.WaitForControlPlaneMachinesToBeUpgradedInput{
347-
Lister: e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
348-
Cluster: cluster2,
349-
MachineCount: int(*kcp.Spec.Replicas),
350-
KubernetesUpgradeVersion: kubernetesUgradeVersion,
351-
}, e2eCtx.E2EConfig.GetIntervals(specName, "wait-contolplane-upgrade")...)
352-
353-
ginkgo.By("Creating the LB service")
354-
lbServiceName := TestSvc + util.RandomString(6)
355-
elbName := createLBService(metav1.NamespaceDefault, lbServiceName, clusterClient)
356-
verifyElbExists(elbName, true)
357-
358-
ginkgo.By("Checking v1.22 StatefulSet still healthy after the upgrade")
359-
waitForStatefulSetRunning(nginxStatefulsetInfo, clusterClient)
360-
361-
nginxStatefulsetInfo2 := createStatefulSetInfo(true, "postupgrade")
362-
363-
ginkgo.By("Deploying StatefulSet on infra when K8s >= 1.23")
364-
createStatefulSet(nginxStatefulsetInfo2, clusterClient)
365-
awsVolIDs = getVolumeIDs(nginxStatefulsetInfo2, clusterClient)
366-
verifyVolumesExists(awsVolIDs)
367-
368-
ginkgo.By("Deleting LB service")
369-
deleteLBService(metav1.NamespaceDefault, lbServiceName, clusterClient)
370-
371-
ginkgo.By("Deleting the Clusters")
372-
deleteCluster(ctx, cluster2)
373-
374-
ginkgo.By("Deleting retained dynamically provisioned volumes")
375-
deleteRetainedVolumes(awsVolIDs)
376-
ginkgo.By("PASSED!")
377-
})
378-
})
379-
380-
ginkgo.PDescribe("CSI=external CCM=in-tree AWSCSIMigration=on: upgrade to v1.23", func() {
381-
ginkgo.It("should create volumes dynamically with external CSI driver and in tree cloud provider", func() {
382-
specName := "only-csi-external-upgrade"
383-
if !e2eCtx.Settings.SkipQuotas {
384-
requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50}
385-
requiredResources.WriteRequestedResources(e2eCtx, specName)
386-
Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
387-
defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))
388-
}
389-
namespace := shared.SetupNamespace(ctx, specName, e2eCtx)
390-
defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx)
391-
ginkgo.By("Creating first cluster with single control plane")
392-
cluster1Name := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
393-
394-
configCluster := defaultConfigCluster(cluster1Name, namespace.Name)
395-
configCluster.KubernetesVersion = e2eCtx.E2EConfig.GetVariable(shared.PreCSIKubernetesVer)
396-
configCluster.WorkerMachineCount = ptr.To[int64](1)
397-
configCluster.Flavor = shared.IntreeCloudProvider
398-
createCluster(ctx, configCluster, result)
399-
400-
// Create statefulSet with PVC and confirm it is working with in-tree providers
401-
nginxStatefulsetInfo := createStatefulSetInfo(true, "intree")
402-
403-
ginkgo.By("Deploying StatefulSet on infra")
404-
clusterClient := e2eCtx.Environment.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, cluster1Name).GetClient()
405-
406-
createStatefulSet(nginxStatefulsetInfo, clusterClient)
407-
awsVolIDs := getVolumeIDs(nginxStatefulsetInfo, clusterClient)
408-
verifyVolumesExists(awsVolIDs)
409-
410-
kubernetesUgradeVersion := e2eCtx.E2EConfig.GetVariable(shared.PostCSIKubernetesVer)
411-
412-
configCluster.KubernetesVersion = kubernetesUgradeVersion
413-
configCluster.Flavor = "external-csi"
414-
415-
cluster2, _, kcp := createCluster(ctx, configCluster, result)
416-
417-
ginkgo.By("Waiting for control-plane machines to have the upgraded kubernetes version")
418-
framework.WaitForControlPlaneMachinesToBeUpgraded(ctx, framework.WaitForControlPlaneMachinesToBeUpgradedInput{
419-
Lister: e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
420-
Cluster: cluster2,
421-
MachineCount: int(*kcp.Spec.Replicas),
422-
KubernetesUpgradeVersion: kubernetesUgradeVersion,
423-
}, e2eCtx.E2EConfig.GetIntervals(specName, "wait-contolplane-upgrade")...)
424-
425-
ginkgo.By("Creating the LB service")
426-
lbServiceName := TestSvc + util.RandomString(6)
427-
elbName := createLBService(metav1.NamespaceDefault, lbServiceName, clusterClient)
428-
verifyElbExists(elbName, true)
429-
430-
ginkgo.By("Checking v1.22 StatefulSet still healthy after the upgrade")
431-
waitForStatefulSetRunning(nginxStatefulsetInfo, clusterClient)
432-
433-
nginxStatefulsetInfo2 := createStatefulSetInfo(false, "postupgrade")
434-
435-
ginkgo.By("Deploying StatefulSet on infra when K8s >= 1.23")
436-
createStatefulSet(nginxStatefulsetInfo2, clusterClient)
437-
awsVolIDs = getVolumeIDs(nginxStatefulsetInfo2, clusterClient)
438-
verifyVolumesExists(awsVolIDs)
439-
440-
ginkgo.By("Deleting LB service")
441-
deleteLBService(metav1.NamespaceDefault, lbServiceName, clusterClient)
442-
443-
ginkgo.By("Deleting the Clusters")
444-
deleteCluster(ctx, cluster2)
445-
446-
ginkgo.By("Deleting retained dynamically provisioned volumes")
447-
deleteRetainedVolumes(awsVolIDs)
448-
ginkgo.By("PASSED!")
449-
})
450-
})
451-
452-
ginkgo.PDescribe("CSI=external CCM=external AWSCSIMigration=on: upgrade to v1.23", func() {
453-
ginkgo.It("should create volumes dynamically with external CSI driver and external cloud provider", func() {
454-
specName := "csi-ccm-external-upgrade"
455-
if !e2eCtx.Settings.SkipQuotas {
456-
requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50}
457-
requiredResources.WriteRequestedResources(e2eCtx, specName)
458-
Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
459-
defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))
460-
}
461-
namespace := shared.SetupNamespace(ctx, specName, e2eCtx)
462-
defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx)
463-
464-
ginkgo.By("Creating first cluster with single control plane")
465-
cluster1Name := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
466-
configCluster := defaultConfigCluster(cluster1Name, namespace.Name)
467-
configCluster.KubernetesVersion = e2eCtx.E2EConfig.GetVariable(shared.PreCSIKubernetesVer)
468-
469-
configCluster.WorkerMachineCount = ptr.To[int64](1)
470-
configCluster.Flavor = shared.IntreeCloudProvider
471-
createCluster(ctx, configCluster, result)
472-
473-
// Create statefulSet with PVC and confirm it is working with in-tree providers
474-
nginxStatefulsetInfo := createStatefulSetInfo(true, "intree")
475-
476-
ginkgo.By("Deploying StatefulSet on infra")
477-
clusterClient := e2eCtx.Environment.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, cluster1Name).GetClient()
478-
479-
createStatefulSet(nginxStatefulsetInfo, clusterClient)
480-
awsVolIDs := getVolumeIDs(nginxStatefulsetInfo, clusterClient)
481-
verifyVolumesExists(awsVolIDs)
482-
483-
kubernetesUgradeVersion := e2eCtx.E2EConfig.GetVariable(shared.PostCSIKubernetesVer)
484-
configCluster.KubernetesVersion = kubernetesUgradeVersion
485-
configCluster.Flavor = "upgrade-to-external-cloud-provider"
486-
487-
cluster2, _, kcp := createCluster(ctx, configCluster, result)
488-
489-
ginkgo.By("Waiting for control-plane machines to have the upgraded kubernetes version")
490-
framework.WaitForControlPlaneMachinesToBeUpgraded(ctx, framework.WaitForControlPlaneMachinesToBeUpgradedInput{
491-
Lister: e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
492-
Cluster: cluster2,
493-
MachineCount: int(*kcp.Spec.Replicas),
494-
KubernetesUpgradeVersion: kubernetesUgradeVersion,
495-
}, e2eCtx.E2EConfig.GetIntervals(specName, "wait-contolplane-upgrade")...)
496-
497-
ginkgo.By("Creating the LB service")
498-
lbServiceName := TestSvc + util.RandomString(6)
499-
elbName := createLBService(metav1.NamespaceDefault, lbServiceName, clusterClient)
500-
verifyElbExists(elbName, true)
501-
502-
ginkgo.By("Checking v1.22 StatefulSet still healthy after the upgrade")
503-
waitForStatefulSetRunning(nginxStatefulsetInfo, clusterClient)
504-
505-
nginxStatefulsetInfo2 := createStatefulSetInfo(false, "postupgrade")
506-
507-
ginkgo.By("Deploying StatefulSet on infra when K8s >= 1.23")
508-
createStatefulSet(nginxStatefulsetInfo2, clusterClient)
509-
awsVolIDs = getVolumeIDs(nginxStatefulsetInfo2, clusterClient)
510-
verifyVolumesExists(awsVolIDs)
511-
512-
ginkgo.By("Deleting LB service")
513-
deleteLBService(metav1.NamespaceDefault, lbServiceName, clusterClient)
514-
515-
ginkgo.By("Deleting the Clusters")
516-
deleteCluster(ctx, cluster2)
517-
518-
ginkgo.By("Deleting retained dynamically provisioned volumes")
519-
deleteRetainedVolumes(awsVolIDs)
520-
ginkgo.By("PASSED!")
521-
})
522-
})
523-
524308
ginkgo.Describe("Workload cluster with AWS SSM Parameter as the Secret Backend", func() {
525309
ginkgo.It("should be creatable and deletable", func() {
526310
specName := "functional-test-ssm-parameter-store"

0 commit comments

Comments
 (0)