Skip to content
This repository has been archived by the owner on Feb 12, 2024. It is now read-only.

e2e tests #366

Draft
wants to merge 11 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
The diff you're trying to view is too large. We only load the first 3000 changed files.
7 changes: 0 additions & 7 deletions .ci/pipeline_definitions
Original file line number Diff line number Diff line change
Expand Up @@ -50,13 +50,6 @@ gardener-extension-provider-vsphere:
scheduling:
suppress_parallel_execution: true
<<: *publish_anchor
create-upgrade-prs:
traits:
component_descriptor: ~
version: ~
cronjob:
interval: '5m'
update_component_deps: ~
scan_artifacts:
traits:
component_descriptor: ~
Expand Down
30 changes: 17 additions & 13 deletions .ci/test-integration
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,15 @@ set -o errexit
set -o nounset
set -o pipefail
set -x
trap cleanup EXIT

function cleanup() {
cd ${CC_ROOT_DIR}/${SOURCE_PATH}/.ci/terraform
terraform destroy -input=false -auto-approve
terraform workspace select default
terraform workspace delete $workspace
/gcve-setup --phase=shutdown --cloud-id=$workspace --location=$zone --service-account=$GOOGLE_APPLICATION_CREDENTIALS --cred-file=$TF_VAR_privatecloud_cred
}

cd "$(dirname $0)/.."

Expand All @@ -26,7 +35,7 @@ export TF_VAR_tm_repo_path="${CC_ROOT_DIR}/${TM_REPO_PATH}"
export zone="us-west2-a"

/gcve-setup --phase=setup --allow-scavenging=true --location=$zone --cidr=10.120.0.0/20 --service-account=$GOOGLE_APPLICATION_CREDENTIALS --cred-file=$TF_VAR_privatecloud_cred
export workspace=$(yq '.privateCloud.name' < $TF_VAR_privatecloud_cred | sed 's:.*/::')
export workspace=$(yq '.privateCloud.name' <$TF_VAR_privatecloud_cred | sed 's:.*/::')

cd ${CC_ROOT_DIR}/${SOURCE_PATH}/.ci/terraform
terraform init -input=false || exit
Expand All @@ -40,18 +49,13 @@ terraform apply -input=false -auto-approve

cd ${CC_ROOT_DIR}/${SOURCE_PATH}
/testrunner run \
--tm-kubeconfig-path=/tmp/kubeconfig \
--testrun-prefix tm-extension-vsphere- \
--no-execution-group \
--timeout=1800 \
--testruns-chart-path=.ci/testruns/default \
--set revision="$(git rev-parse HEAD)"
--tm-kubeconfig-path=/tmp/kubeconfig \
--testrun-prefix tm-extension-vsphere- \
--no-execution-group \
--timeout=1800 \
--testruns-chart-path=.ci/testruns/default \
--set revision="$(git rev-parse HEAD)"

kubectl --kubeconfig=/tmp/kubeconfig delete testrun --all

cd ${CC_ROOT_DIR}/${SOURCE_PATH}/.ci/terraform
terraform destroy -input=false -auto-approve
terraform workspace select default
terraform workspace delete $workspace

/gcve-setup --phase=shutdown --cloud-id=$workspace --location=$zone --service-account=$GOOGLE_APPLICATION_CREDENTIALS --cred-file=$TF_VAR_privatecloud_cred
cleanup
1 change: 1 addition & 0 deletions .test-defs/infrastructure-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,5 @@ spec:
--nsxt-t0-gateway="${NSXT_T0_GATEWAY}"
--nsxt-edge-cluster="${NSXT_EDGE_CLUSTER}"
--nsxt-snat-ip-pool="${NSXT_SNAT_IP_POOL}"
--ginkgo.focus="Infrastructure tests"
image: golang:1.19.5
4 changes: 2 additions & 2 deletions charts/images.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ images:
- name: machine-controller-manager
sourceRepository: github.com/gardener/machine-controller-manager
repository: eu.gcr.io/gardener-project/gardener/machine-controller-manager
tag: "v0.48.0"
tag: "v0.49.1"
labels:
- name: 'gardener.cloud/cve-categorisation'
value:
Expand All @@ -123,7 +123,7 @@ images:
- name: machine-controller-manager-provider-vsphere
sourceRepository: github.com/gardener/machine-controller-manager-provider-vsphere
repository: eu.gcr.io/gardener-project/gardener/machine-controller-manager-provider-vsphere
tag: "v0.20.0"
tag: "v0.22.0"
labels:
- name: 'gardener.cloud/cve-categorisation'
value:
Expand Down
131 changes: 131 additions & 0 deletions test/integration/healthcheck/test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
/*
* Copyright (c) 2020-2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/**
Overview
- Tests the health checks of the extension: provider-vsphere.
- Manipulates health check relevant resources and expects the extension-provider to properly report the results as conditions in the respective CRD (ControlPlane(Type Normal & Exposure) & Worker CRD).

Prerequisites
- A Shoot exists.

Test-cases:
1) ControlPlane
1.1) HealthCondition Type: Shoot ControlPlaneHealthy
- delete the deployment 'cloud-controller-manager' and verify health check conditions in the ControlPlane status.
1.2) HealthCondition Type: Shoot SystemComponentsHealthy
- update the ManagedResource 'extension-controlplane-shoot' with an unhealthy condition and verify health check conditions in the ControlPlane status.
//2) ControlPlane (Type: Exposure)
// 2.1) HealthCondition Type: Shoot ControlPlaneHealthy
// - delete the deployment 'aws-lb-readvertiser' and verify health check conditions in the ControlPlane status.
3) Worker
3.1) HealthCondition Type: Shoot ControlPlaneHealthy
- delete the deployment 'machine-controller-manager' and verify health check conditions in the Worker status.
3.2) HealthCondition Type: Shoot SystemComponentsHealthy
- update the ManagedResource 'extension-worker-mcm-shoot' with an unhealthy condition and verify health check conditions in the Worker status.
3.3) HealthCondition Type: Shoot EveryNodeReady
- delete a machine of the shoot cluster and verify the health check conditions in the Worker status report a missing node.
**/

package healthcheck

import (
"context"
"fmt"
"time"

"github.com/gardener/gardener-extension-provider-vsphere/pkg/vsphere"

genericcontrolplaneactuator "github.com/gardener/gardener/extensions/pkg/controller/controlplane/genericactuator"
genericworkeractuator "github.com/gardener/gardener/extensions/pkg/controller/worker/genericactuator"
gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
"github.com/gardener/gardener/pkg/client/kubernetes"
"github.com/gardener/gardener/test/framework"
healthcheckoperation "github.com/gardener/gardener/test/testmachinery/extensions/healthcheck"
machinev1alpha1 "github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1"
"github.com/onsi/ginkgo/v2"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)

const (
timeout = 10 * time.Minute
nodeRecreationTimeout = 20 * time.Minute
setupContextTimeout = 2 * time.Minute
)

var _ = ginkgo.Describe("AWS integration test: health checks", func() {
f := createShootFramework()

ginkgo.Context("ControlPlane", func() {
ginkgo.Context("Condition type: ShootControlPlaneHealthy", func() {
f.Serial().Release().CIt(fmt.Sprintf("ControlPlane CRD should contain unhealthy condition because the deployment '%s' cannot be found in the shoot namespace in the seed", vsphere.CloudControllerManagerName), func(ctx context.Context) {
err := healthcheckoperation.ControlPlaneHealthCheckDeleteSeedDeployment(ctx, f, f.Shoot.GetName(), vsphere.CloudControllerManagerName, gardencorev1beta1.ShootControlPlaneHealthy)
framework.ExpectNoError(err)
}, timeout)
})

ginkgo.Context("Condition type: ShootSystemComponentsHealthy", func() {
f.Serial().Release().CIt(fmt.Sprintf("ControlPlane CRD should contain unhealthy condition due to ManagedResource ('%s') unhealthy", genericcontrolplaneactuator.ControlPlaneShootChartResourceName), func(ctx context.Context) {
err := healthcheckoperation.ControlPlaneHealthCheckWithManagedResource(ctx, setupContextTimeout, f, genericcontrolplaneactuator.ControlPlaneShootChartResourceName, gardencorev1beta1.ShootSystemComponentsHealthy)
framework.ExpectNoError(err)
}, timeout)
})
})

//ginkgo.Context("ControlPlane-exposure", func() {
// ginkgo.Context("Condition type: ShootControlPlaneHealthy", func() {
// f.Serial().Release().CIt(fmt.Sprintf("ControlPlane CRD should contain unhealthy condition because the deployment '%s' cannot be found in the shoot namespace in the seed", vsphere.LBReadvertiserDeploymentName), func(ctx context.Context) {
// err := healthcheckoperation.ControlPlaneHealthCheckDeleteSeedDeployment(ctx, f, fmt.Sprintf("%s-exposure", f.Shoot.GetName()), vsphere.LBReadvertiserDeploymentName, gardencorev1beta1.ShootControlPlaneHealthy)
// framework.ExpectNoError(err)
// }, timeout)
// })
//})

ginkgo.Context("Worker", func() {
ginkgo.Context("Condition type: ShootControlPlaneHealthy", func() {
f.Serial().Release().CIt(fmt.Sprintf("Worker CRD should contain unhealthy condition because the deployment '%s' cannot be found in the shoot namespace in the seed", vsphere.MachineControllerManagerName), func(ctx context.Context) {
err := healthcheckoperation.WorkerHealthCheckDeleteSeedDeployment(ctx, f, f.Shoot.GetName(), vsphere.MachineControllerManagerName, gardencorev1beta1.ShootControlPlaneHealthy)
framework.ExpectNoError(err)
}, timeout)
})

ginkgo.Context("Condition type: ShootSystemComponentsHealthy", func() {
f.Serial().Release().CIt(fmt.Sprintf("Worker CRD should contain unhealthy condition due to ManagedResource ('%s') unhealthy", genericworkeractuator.McmShootResourceName), func(ctx context.Context) {
err := healthcheckoperation.WorkerHealthCheckWithManagedResource(ctx, setupContextTimeout, f, genericworkeractuator.McmShootResourceName, gardencorev1beta1.ShootSystemComponentsHealthy)
framework.ExpectNoError(err)
}, timeout)
})

ginkgo.Context("Condition type: ShootEveryNodeReady", func() {
f.Serial().Release().CIt("Worker CRD should contain unhealthy condition because not enough machines are available", func(ctx context.Context) {
err := healthcheckoperation.MachineDeletionHealthCheck(ctx, f)
framework.ExpectNoError(err)
}, nodeRecreationTimeout)
})
})
})

func createShootFramework() *framework.ShootFramework {
extensionSeedScheme := kubernetes.SeedScheme
seedSchemeBuilder := runtime.NewSchemeBuilder(
machinev1alpha1.AddToScheme,
)
utilruntime.Must(seedSchemeBuilder.AddToScheme(extensionSeedScheme))
return framework.NewShootFramework(&framework.ShootConfig{
SeedScheme: nil,
})
}
95 changes: 76 additions & 19 deletions test/integration/infrastructure/infrastructure_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,23 +27,39 @@ import (
"strings"
"time"

apisvsphere "github.com/gardener/gardener-extension-provider-vsphere/pkg/apis/vsphere"
vsphereinstall "github.com/gardener/gardener-extension-provider-vsphere/pkg/apis/vsphere/install"
vspherev1alpha1 "github.com/gardener/gardener-extension-provider-vsphere/pkg/apis/vsphere/v1alpha1"
controllerinfra "github.com/gardener/gardener-extension-provider-vsphere/pkg/controller/infrastructure"
"github.com/gardener/gardener-extension-provider-vsphere/pkg/vsphere"
"github.com/gardener/gardener-extension-provider-vsphere/pkg/vsphere/infrastructure"
"github.com/gardener/gardener-extension-provider-vsphere/pkg/vsphere/infrastructure/ensurer"
"github.com/gardener/gardener-extension-provider-vsphere/pkg/vsphere/infrastructure/task"
gardenerv1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
"github.com/gardener/gardener/pkg/extensions"
"github.com/gardener/gardener/pkg/logger"
gardenerutils "github.com/gardener/gardener/pkg/utils"
"github.com/gardener/gardener/test/framework"
gframework "github.com/gardener/gardener/test/framework"
"github.com/go-logr/logr"
"github.com/google/uuid"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
clientset "k8s.io/client-go/kubernetes"
kframework "k8s.io/kubernetes/test/e2e/framework"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
admissionapi "k8s.io/pod-security-admission/api"

vapi_errors "github.com/vmware/vsphere-automation-sdk-go/lib/vapi/std/errors"
vapiclient "github.com/vmware/vsphere-automation-sdk-go/runtime/protocol/client"
"github.com/vmware/vsphere-automation-sdk-go/services/nsxt/infra"
"github.com/vmware/vsphere-automation-sdk-go/services/nsxt/infra/ip_pools"
t1nat "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/infra/tier_1s/nat"
"github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model"

"github.com/google/uuid"
corev1 "k8s.io/api/core/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -55,15 +71,6 @@ import (
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"

apisvsphere "github.com/gardener/gardener-extension-provider-vsphere/pkg/apis/vsphere"
vsphereinstall "github.com/gardener/gardener-extension-provider-vsphere/pkg/apis/vsphere/install"
vspherev1alpha1 "github.com/gardener/gardener-extension-provider-vsphere/pkg/apis/vsphere/v1alpha1"
controllerinfra "github.com/gardener/gardener-extension-provider-vsphere/pkg/controller/infrastructure"
"github.com/gardener/gardener-extension-provider-vsphere/pkg/vsphere"
"github.com/gardener/gardener-extension-provider-vsphere/pkg/vsphere/infrastructure"
"github.com/gardener/gardener-extension-provider-vsphere/pkg/vsphere/infrastructure/ensurer"
"github.com/gardener/gardener-extension-provider-vsphere/pkg/vsphere/infrastructure/task"
)

var (
Expand Down Expand Up @@ -230,7 +237,7 @@ var _ = AfterSuite(func() {
}()

By("running cleanup actions")
framework.RunCleanupActions()
gframework.RunCleanupActions()

By("stopping test environment")
Expect(testEnv.Stop()).To(Succeed())
Expand All @@ -254,7 +261,7 @@ var _ = Describe("Infrastructure tests", func() {
})

AfterEach(func() {
framework.RunCleanupActions()
gframework.RunCleanupActions()
})

Context("with infrastructure creating own T1 gateway", func() {
Expand All @@ -268,17 +275,67 @@ var _ = Describe("Infrastructure tests", func() {
namespace := nsxtInfraSpec.ClusterName
t1Ref, lbSvcRef, err := prepareNewT1GatewayAndLBService(log, namespace, *nsxtInfraSpec, vsphereClient)
// ensure deleting resources even on errors
var cleanupHandle framework.CleanupActionHandle
cleanupHandle = framework.AddCleanupAction(func() {
var cleanupHandle gframework.CleanupActionHandle
cleanupHandle = gframework.AddCleanupAction(func() {
err := teardownT1GatewayAndLBService(log, t1Ref, lbSvcRef, vsphereClient)
Expect(err).NotTo(HaveOccurred())

framework.RemoveCleanupAction(cleanupHandle)
gframework.RemoveCleanupAction(cleanupHandle)
})
Expect(err).NotTo(HaveOccurred())
runTest(t1Ref.Path, lbSvcRef.Path)
})
})

Context("with iterative volume mapping checks", func() {
f := kframework.NewDefaultFramework("statefulset")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ssName := "ss"
var ss *appsv1.StatefulSet
var ns string
var clientSet clientset.Interface
BeforeEach(func() {
clientSet = f.ClientSet
ns = f.Namespace.Name
})

It("should provide basic identity", func() {
By("Creating statefulset " + ssName + " in namespace " + ns)
e2epv.SkipIfNoDefaultStorageClass(clientSet)
*(ss.Spec.Replicas) = 3
e2estatefulset.PauseNewPods(ss)

_, err := clientSet.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
kframework.ExpectNoError(err)

By("Saturating stateful set " + ss.Name)
e2estatefulset.Saturate(clientSet, ss)

By("Verifying statefulset mounted data directory is usable")
kframework.ExpectNoError(e2estatefulset.CheckMount(clientSet, ss, "/data"))

By("Verifying statefulset provides a stable hostname for each pod")
kframework.ExpectNoError(e2estatefulset.CheckHostname(clientSet, ss))

By("Verifying statefulset set proper service name")
kframework.ExpectNoError(e2estatefulset.CheckServiceName(ss, "test"))

cmd := "echo $(hostname) | dd of=/data/hostname conv=fsync"
By("Running " + cmd + " in all stateful pods")
kframework.ExpectNoError(e2estatefulset.ExecInStatefulPods(clientSet, ss, cmd))

By("Restarting statefulset " + ss.Name)
e2estatefulset.Restart(clientSet, ss)
e2estatefulset.WaitForRunningAndReady(clientSet, *ss.Spec.Replicas, ss)

By("Verifying statefulset mounted data directory is usable")
kframework.ExpectNoError(e2estatefulset.CheckMount(clientSet, ss, "/data"))

cmd = "if [ \"$(cat /data/hostname)\" = \"$(hostname)\" ]; then exit 0; else exit 1; fi"
By("Running " + cmd + " in all stateful pods")
kframework.ExpectNoError(e2estatefulset.ExecInStatefulPods(clientSet, ss, cmd))
})
})
})

func runTest(t1RefPath string, lbSvcRefPath string) {
Expand All @@ -293,8 +350,8 @@ func runTest(t1RefPath string, lbSvcRefPath string) {
providerStatus *vspherev1alpha1.InfrastructureStatus
)

var cleanupHandle framework.CleanupActionHandle
cleanupHandle = framework.AddCleanupAction(func() {
var cleanupHandle gframework.CleanupActionHandle
cleanupHandle = gframework.AddCleanupAction(func() {
By("delete infrastructure")
Expect(client.IgnoreNotFound(c.Delete(ctx, infra))).To(Succeed())

Expand All @@ -316,7 +373,7 @@ func runTest(t1RefPath string, lbSvcRefPath string) {
Expect(client.IgnoreNotFound(c.Delete(ctx, namespace))).To(Succeed())
Expect(client.IgnoreNotFound(c.Delete(ctx, cluster))).To(Succeed())

framework.RemoveCleanupAction(cleanupHandle)
gframework.RemoveCleanupAction(cleanupHandle)
})

By("create namespace for test execution")
Expand Down