Skip to content

Commit

Permalink
Bumping up kubernetes version to 1.31.2
Browse files Browse the repository at this point in the history
  • Loading branch information
nikhilbarge committed Nov 21, 2024
1 parent f97164f commit 4e2f8b0
Show file tree
Hide file tree
Showing 18 changed files with 74 additions and 76 deletions.
7 changes: 3 additions & 4 deletions pkg/syncer/metadatasyncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -3473,10 +3473,9 @@ func initResizeReconciler(ctx context.Context, tkgClient clientset.Interface,
// https://github.com/kubernetes-sigs/vsphere-csi-driver/issues/585
informerFactory := informers.NewSharedInformerFactory(tkgClient, resizeResyncPeriod)

rc, err := newResizeReconciler(tkgClient, supervisorClient, supervisorNamespace, resizeResyncPeriod, informerFactory,
workqueue.NewTypedItemExponentialFailureRateLimiter[any](resizeRetryIntervalStart, resizeRetryIntervalMax),
stopCh,
)
rc, err := newResizeReconciler(tkgClient, supervisorClient, supervisorNamespace,
resizeResyncPeriod, informerFactory, workqueue.NewTypedItemExponentialFailureRateLimiter[any](
resizeRetryIntervalStart, resizeRetryIntervalMax), stopCh)
if err != nil {
return err
}
Expand Down
4 changes: 2 additions & 2 deletions tests/e2e/csi_snapshot_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -563,8 +563,8 @@ func createPreProvisionedSnapshotInGuestCluster(ctx context.Context, volumeSnaps
return nil, nil, false, false, fmt.Errorf("failed to delete VolumeSnapshotContent: %v", err)
}

framework.Logf(fmt.Sprintf("Creating static VolumeSnapshotContent in Guest Cluster using "+
"supervisor VolumeSnapshotName %s", svcVolumeSnapshotName))
framework.Logf("Creating static VolumeSnapshotContent in Guest Cluster using "+
"supervisor VolumeSnapshotName %s", svcVolumeSnapshotName)
staticSnapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Create(ctx,
getVolumeSnapshotContentSpec(snapV1.DeletionPolicy("Delete"), svcVolumeSnapshotName,
"static-vs", namespace), metav1.CreateOptions{})
Expand Down
2 changes: 1 addition & 1 deletion tests/e2e/csi_static_provisioning_basic.go
Original file line number Diff line number Diff line change
Expand Up @@ -1001,7 +1001,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() {
ginkgo.By("Creating pod")
pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
podName := pod.GetName
podName := pod.GetName()
framework.Logf("podName: %s", podName)

ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s",
Expand Down
2 changes: 1 addition & 1 deletion tests/e2e/fullsync_test_for_block_volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -935,7 +935,7 @@ func verifyCnsVolumeMetadata4GCVol(volumeID string, svcPVCName string, gcPvc *v1
cnsQueryResult, err := e2eVSphere.queryCNSVolumeWithResult(volumeID)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
if len(cnsQueryResult.Volumes) == 0 {
framework.Logf("CNS volume query yielded no results for volume id: " + volumeID)
framework.Logf("CNS volume query yielded no results for volume id: %s", volumeID)
return false
}
cnsVolume := cnsQueryResult.Volumes[0]
Expand Down
66 changes: 33 additions & 33 deletions tests/e2e/gc_metadata_syncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,15 +164,15 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() {
gomega.Expect(isDiskAttached).To(gomega.BeTrue(), fmt.Sprintf("Volume is not attached to the node, %s", vmUUID))

podUID := string(pod.UID)
framework.Logf("Pod uuid : " + podUID)
framework.Logf("PVC name in SV " + svcPVCName)
framework.Logf("Pod uuid : %s", podUID)
framework.Logf("PVC name in SV %s", svcPVCName)
pvcUID = string(pvc.GetUID())
framework.Logf("PVC UUID in GC " + pvcUID)
framework.Logf("PVC UUID in GC %s", pvcUID)
gcClusterID = strings.Replace(svcPVCName, pvcUID, "", -1)

framework.Logf("gcClusterId " + gcClusterID)
framework.Logf("gcClusterId %s", gcClusterID)
pvUID := string(pv.UID)
framework.Logf("PV uuid " + pvUID)
framework.Logf("PV uuid %s", pvUID)

verifyEntityReferenceInCRDInSupervisor(ctx, f, pv.Spec.CSI.VolumeHandle, crdCNSVolumeMetadatas,
crdVersion, crdGroup, true, pv.Spec.CSI.VolumeHandle, false, nil, false)
Expand Down Expand Up @@ -241,14 +241,14 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()

framework.Logf("PVC name in SV " + svcPVCName)
framework.Logf("PVC name in SV %s", svcPVCName)
pvcUID = string(pvc.GetUID())
framework.Logf("PVC UUID in GC " + pvcUID)
framework.Logf("PVC UUID in GC %s", pvcUID)
gcClusterID = strings.Replace(svcPVCName, pvcUID, "", -1)

framework.Logf("gcClusterId " + gcClusterID)
framework.Logf("gcClusterId %s", gcClusterID)
pvUID := string(pv.UID)
framework.Logf("PV uuid " + pvUID)
framework.Logf("PV uuid %s", pvUID)

verifyEntityReferenceInCRDInSupervisor(ctx, f, pv.Spec.CSI.VolumeHandle,
crdCNSVolumeMetadatas, crdVersion, crdGroup, true, pv.Spec.CSI.VolumeHandle, false, nil, false)
Expand Down Expand Up @@ -337,15 +337,15 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() {
gomega.Expect(isDiskAttached).To(gomega.BeTrue(), fmt.Sprintf("Volume is not attached to the node, %s", vmUUID))

podUID := string(pod.UID)
framework.Logf("Pod uuid : " + podUID)
framework.Logf("PVC name in SV " + svcPVCName)
framework.Logf("Pod uuid : %s", podUID)
framework.Logf("PVC name in SV %s", svcPVCName)
pvcUID = string(pvc.GetUID())
framework.Logf("PVC UUID in GC " + pvcUID)
framework.Logf("PVC UUID in GC %s", pvcUID)
gcClusterID = strings.Replace(svcPVCName, pvcUID, "", -1)

framework.Logf("gcClusterId " + gcClusterID)
framework.Logf("gcClusterId %s", gcClusterID)
pvUID := string(pv.UID)
framework.Logf("PV uuid " + pvUID)
framework.Logf("PV uuid %s", pvUID)

verifyEntityReferenceInCRDInSupervisor(ctx, f, pv.Spec.CSI.VolumeHandle,
crdCNSVolumeMetadatas, crdVersion, crdGroup, true, pv.Spec.CSI.VolumeHandle, false, nil, false)
Expand Down Expand Up @@ -444,7 +444,7 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
volumeID = getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle)
gomega.Expect(volumeID).NotTo(gomega.BeEmpty())
framework.Logf("value of volumeID " + volumeID)
framework.Logf("value of volumeID %s", volumeID)
ginkgo.By(fmt.Sprintf("Waiting for labels %+v to be updated for pvc %s in namespace %s",
pvclabels, volumespec.PersistentVolumeClaim.ClaimName,
GetAndExpectStringEnvVar(envSupervisorClusterNamespace)))
Expand Down Expand Up @@ -477,7 +477,7 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
volumeID = getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle)
gomega.Expect(volumeID).NotTo(gomega.BeEmpty())
framework.Logf("value of volumeID " + volumeID)
framework.Logf("value of volumeID %s", volumeID)
ginkgo.By(fmt.Sprintf("Waiting for labels %+v to be updated for pv %s", pvlabels, pv.Name))
err = e2eVSphere.waitForLabelsToBeUpdated(volumeID, pvlabels,
string(cnstypes.CnsKubernetesEntityTypePV), pv.Name, "")
Expand Down Expand Up @@ -554,15 +554,15 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() {
gomega.Expect(isDiskAttached).To(gomega.BeTrue(), fmt.Sprintf("Volume is not attached to the node, %s", vmUUID))

podUID := string(pod.UID)
framework.Logf("Pod uuid : " + podUID)
framework.Logf("PVC name in SV " + svcPVCName)
framework.Logf("Pod uuid : %s", podUID)
framework.Logf("PVC name in SV %s", svcPVCName)
pvcUID = string(pvc.GetUID())
framework.Logf("PVC UUID in GC " + pvcUID)
framework.Logf("PVC UUID in GC %s", pvcUID)
gcClusterID = strings.Replace(svcPVCName, pvcUID, "", -1)

framework.Logf("gcClusterId " + gcClusterID)
framework.Logf("gcClusterId %s", gcClusterID)
pvUID := string(pv.UID)
framework.Logf("PV uuid " + pvUID)
framework.Logf("PV uuid %s", pvUID)

verifyEntityReferenceInCRDInSupervisor(ctx, f, pv.Spec.CSI.VolumeHandle,
crdCNSVolumeMetadatas, crdVersion, crdGroup, true, pv.Spec.CSI.VolumeHandle, false, nil, false)
Expand Down Expand Up @@ -1173,14 +1173,14 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() {
volumeID = getVolumeIDFromSupervisorCluster(svcPVCName)
gomega.Expect(volumeID).NotTo(gomega.BeEmpty())

framework.Logf("PVC name in SV " + svcPVCName)
framework.Logf("PVC name in SV %s", svcPVCName)
pvcUID := string(pvclaim.GetUID())
framework.Logf("PVC UUID in GC " + pvcUID)
framework.Logf("PVC UUID in GC %s", pvcUID)
gcClusterID := strings.Replace(svcPVCName, pvcUID, "", -1)

framework.Logf("gcClusterId " + gcClusterID)
framework.Logf("gcClusterId %s", gcClusterID)
pvUID := string(pv.UID)
framework.Logf("PV uuid " + pvUID)
framework.Logf("PV uuid %s", pvUID)

verifyEntityReferenceInCRDInSupervisor(ctx, f, pv.Spec.CSI.VolumeHandle,
crdCNSVolumeMetadatas, crdVersion, crdGroup, true, pv.Spec.CSI.VolumeHandle, false, nil, false)
Expand Down Expand Up @@ -1274,9 +1274,9 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() {
volumeIDNewGC = getVolumeIDFromSupervisorCluster(svcNewPVCName)
gomega.Expect(volumeIDNewGC).NotTo(gomega.BeEmpty())

framework.Logf("PVC name in SV " + svcNewPVCName)
framework.Logf("PVC name in SV %s", svcNewPVCName)
pvcNewUID := string(pvcNew.GetUID())
framework.Logf("pvcNewUID in GC " + pvcNewUID)
framework.Logf("pvcNewUID in GC %s", pvcNewUID)
gcNewClusterID := strings.Replace(svcNewPVCName, pvcNewUID, "", -1)

ginkgo.By("Creating PV in new guest cluster with volume handle from SVC")
Expand All @@ -1289,7 +1289,7 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() {
gomega.Expect(err).NotTo(gomega.HaveOccurred())

pvNewUID := string(pvNew.UID)
framework.Logf("pvNew uuid " + pvNewUID)
framework.Logf("pvNew uuid %s", pvNewUID)

ginkgo.By("verify crd in supervisor")
time.Sleep(10 * time.Second)
Expand Down Expand Up @@ -1400,14 +1400,14 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() {
volumeID = getVolumeIDFromSupervisorCluster(svcPVCName)
gomega.Expect(volumeID).NotTo(gomega.BeEmpty())

framework.Logf("PVC name in SV " + svcPVCName)
framework.Logf("PVC name in SV %s", svcPVCName)
pvcUID := string(pvclaim.GetUID())
framework.Logf("PVC UUID in GC " + pvcUID)
framework.Logf("PVC UUID in GC %s", pvcUID)
gcClusterID := strings.Replace(svcPVCName, pvcUID, "", -1)

framework.Logf("gcClusterId " + gcClusterID)
framework.Logf("gcClusterId %s", gcClusterID)
pvUID := string(pv.UID)
framework.Logf("PV uuid " + pvUID)
framework.Logf("PV uuid %s", pvUID)

verifyEntityReferenceInCRDInSupervisor(ctx, f, pv.Spec.CSI.VolumeHandle,
crdCNSVolumeMetadatas, crdVersion, crdGroup, true, pv.Spec.CSI.VolumeHandle, false, nil, false)
Expand Down Expand Up @@ -1468,7 +1468,7 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() {
gomega.Expect(err).NotTo(gomega.HaveOccurred())

pvNewUID := string(pvNew.UID)
framework.Logf("pvNew uuid " + pvNewUID)
framework.Logf("pvNew uuid %s", pvNewUID)

defer func() {
if pvc != nil {
Expand Down
2 changes: 1 addition & 1 deletion tests/e2e/multi_vc.go
Original file line number Diff line number Diff line change
Expand Up @@ -1920,7 +1920,7 @@ var _ = ginkgo.Describe("[multivc-positive] MultiVc-Topology-Positive", func() {
}
vimClient, err := convertToVimClient(ctx, soapClient)
if err != nil {
framework.Logf("Error: ", err)
framework.Logf("Error: %v", err)
}
finder := find.NewFinder(vimClient, false)

Expand Down
6 changes: 3 additions & 3 deletions tests/e2e/multi_vc_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -1035,7 +1035,7 @@ func createVsphereConfigSecret(namespace string, cfg e2eTestConfig, sshClientCon
conf += fmt.Sprintf("[Labels]\ntopology-categories = \"%s\"\n", cfg.Labels.TopologyCategories)
conf += "\nEOF"

framework.Logf(conf)
framework.Logf("conf: %s", conf)

result, err := sshExec(sshClientConfig, controlIp, conf)
if err != nil && result.Code != 0 {
Expand All @@ -1044,7 +1044,7 @@ func createVsphereConfigSecret(namespace string, cfg e2eTestConfig, sshClientCon
}
applyConf := "kubectl create secret generic vsphere-config-secret --from-file=csi-vsphere.conf " +
"-n " + namespace
framework.Logf(applyConf)
framework.Logf("applyConf: %s", applyConf)
result, err = sshExec(sshClientConfig, controlIp, applyConf)
if err != nil && result.Code != 0 {
fssh.LogResult(result)
Expand Down Expand Up @@ -1147,7 +1147,7 @@ func createStaticFCDPvAndPvc(ctx context.Context, f *framework.Framework,
fcdID, err := multiVCe2eVSphere.createFCDInMultiVC(ctx, "BasicStaticFCD"+curtimeinstring, diskSizeInMb,
defaultDatastore.Reference(), clientIndex)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("FCD ID :", fcdID)
framework.Logf("FCD ID : %s", fcdID)

ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora",
pandoraSyncWaitTime, fcdID))
Expand Down
2 changes: 1 addition & 1 deletion tests/e2e/policy_driven_vol_allocation.go
Original file line number Diff line number Diff line change
Expand Up @@ -3723,7 +3723,7 @@ func verifyKnownDataInPod(f *framework.Framework, pod *v1.Pod, testdataFile stri

framework.Logf("Running diff with source file and file from pod %v for 100M starting %vM", pod.Name, skip)
op, err := exec.Command("diff", testdataFile, testdataFile+pod.Name).Output()
framework.Logf("diff: ", op)
framework.Logf("diff: %v", op)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(len(op)).To(gomega.BeZero())
}
Expand Down
4 changes: 2 additions & 2 deletions tests/e2e/snapshot_stretched_supervisor.go
Original file line number Diff line number Diff line change
Expand Up @@ -723,7 +723,7 @@ var _ = ginkgo.Describe("Stretched-Supervisor-Snapshot", func() {
ginkgo.By("Verify filesystem size for mount point /mnt/volume1")
fsSize, err := getFSSizeMb(f, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("File system size after expansion : %s, before expansion: %s", fsSize, diskSizeInMb)
framework.Logf("File system size after expansion : %d, before expansion: %d", fsSize, diskSizeInMb)
if fsSize < diskSizeInMb {
framework.Failf("error updating filesystem size for %q. Resulting filesystem size is %d", pvclaim.Name, fsSize)
}
Expand Down Expand Up @@ -886,7 +886,7 @@ var _ = ginkgo.Describe("Stretched-Supervisor-Snapshot", func() {
ginkgo.By("Creating pod")
pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvc}, false, execRWXCommandPod1)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
podName := pod.GetName
podName := pod.GetName()
framework.Logf("podName : %s", podName)

ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s",
Expand Down
2 changes: 1 addition & 1 deletion tests/e2e/staging_env_basic.go
Original file line number Diff line number Diff line change
Expand Up @@ -1190,7 +1190,7 @@ var _ = ginkgo.Describe("[csi-supervisor-staging] Tests for WCP env with minimal
ginkgo.By("Verify filesystem size for mount point /mnt/volume1")
fsSize, err = getFSSizeMbWithoutF(namespace, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("File system size after expansion : %s", fsSize)
framework.Logf("File system size after expansion : %d", fsSize)
// Filesystem size may be smaller than the size of the block volume
// so here we are checking if the new filesystem size is greater than
// the original volume size as the filesystem is formatted for the
Expand Down
2 changes: 1 addition & 1 deletion tests/e2e/storage_policy_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ func updateVmfsPolicyAlloctype(
if err != nil {
return err
}
framework.Logf("policy content after update", spew.Sdump(policyContent))
framework.Logf("policy content after update: %s", spew.Sdump(policyContent))
return nil
}

Expand Down
2 changes: 1 addition & 1 deletion tests/e2e/tkgs_ha.go
Original file line number Diff line number Diff line change
Expand Up @@ -2062,7 +2062,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() {
ginkgo.By("Verify filesystem size for mount point /mnt/volume1")
fsSize, err = getFSSizeMb(f, podList[i])
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("File system size after expansion : %s", fsSize)
framework.Logf("File system size after expansion : %d", fsSize)
// Filesystem size may be smaller than the size of the block volume
// so here we are checking if the new filesystem size is greater than
// the original volume size as the filesystem is formatted for the
Expand Down
2 changes: 1 addition & 1 deletion tests/e2e/topology_snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ var _ = ginkgo.Describe("[topology-snapshot] Topology-Snapshot", func() {
ginkgo.By("Creating pod")
pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim2}, false, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
podName := pod.GetName
podName := pod.GetName()
framework.Logf("podName: %s", podName)

/* Verify PV node affinity and that the PODS are running on appropriate node as
Expand Down
2 changes: 1 addition & 1 deletion tests/e2e/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -6550,7 +6550,7 @@ func verifyDataFromRawBlockVolume(ns string, podName string, devicePath string,

framework.Logf("Running diff with source file and file from pod %v for 1M starting %vM", podName, skip)
op, err := exec.Command("diff", testdataFile, testdataFile+podName).Output()
framework.Logf("diff: ", op)
framework.Logf("diff: %v", op)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(len(op)).To(gomega.BeZero())
}
Expand Down
11 changes: 5 additions & 6 deletions tests/e2e/vcp_to_csi_attach_detach.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ import (
fpod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
fpv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/pod-security-admission/api"
admissionapi "k8s.io/pod-security-admission/api"

"sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/migration/v1alpha1"
Expand Down Expand Up @@ -718,7 +717,7 @@ var _ = ginkgo.Describe("[csi-vcp-mig] VCP to CSI migration attach, detach tests

ginkgo.By("Creating pod")

pod := fpod.MakePod(namespace, nil, []*v1.PersistentVolumeClaim{pvc1}, api.LevelBaseline, execCommand)
pod := fpod.MakePod(namespace, nil, []*v1.PersistentVolumeClaim{pvc1}, admissionapi.LevelBaseline, execCommand)
pod.Spec.Containers[0].Image = busyBoxImageOnGcr
pod, err = client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expand Down Expand Up @@ -811,7 +810,7 @@ var _ = ginkgo.Describe("[csi-vcp-mig] VCP to CSI migration attach, detach tests
kubeletMigEnabled = true

ginkgo.By("Creating pod")
pod := fpod.MakePod(namespace, nil, []*v1.PersistentVolumeClaim{pvc1}, api.LevelBaseline, execCommand)
pod := fpod.MakePod(namespace, nil, []*v1.PersistentVolumeClaim{pvc1}, admissionapi.LevelBaseline, execCommand)
pod.Spec.Containers[0].Image = busyBoxImageOnGcr
pod, err = client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expand Down Expand Up @@ -917,7 +916,7 @@ var _ = ginkgo.Describe("[csi-vcp-mig] VCP to CSI migration attach, detach tests
gomega.Expect(err).NotTo(gomega.HaveOccurred())

ginkgo.By("Creating pod")
pod := fpod.MakePod(namespace, nil, []*v1.PersistentVolumeClaim{pvc1}, api.LevelBaseline, execCommand)
pod := fpod.MakePod(namespace, nil, []*v1.PersistentVolumeClaim{pvc1}, admissionapi.LevelBaseline, execCommand)
pod.Spec.Containers[0].Image = busyBoxImageOnGcr
pod, err = client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expand Down Expand Up @@ -1011,7 +1010,7 @@ var _ = ginkgo.Describe("[csi-vcp-mig] VCP to CSI migration attach, detach tests
kubeletMigEnabled = true

ginkgo.By("Creating pod")
pod := fpod.MakePod(namespace, nil, []*v1.PersistentVolumeClaim{pvc1}, api.LevelBaseline, execCommand)
pod := fpod.MakePod(namespace, nil, []*v1.PersistentVolumeClaim{pvc1}, admissionapi.LevelBaseline, execCommand)
pod.Spec.Containers[0].Image = busyBoxImageOnGcr
pod, err = client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expand Down Expand Up @@ -1062,7 +1061,7 @@ func createMultiplePods(ctx context.Context, client clientset.Interface,
var err error
for _, pvcs := range pvclaims2d {
if len(pvcs) != 0 {
pod := fpod.MakePod(pvcs[0].Namespace, nil, pvcs, api.LevelBaseline, execCommand)
pod := fpod.MakePod(pvcs[0].Namespace, nil, pvcs, admissionapi.LevelBaseline, execCommand)
pod.Spec.Containers[0].Image = busyBoxImageOnGcr
pod, err := client.CoreV1().Pods(pvcs[0].Namespace).Create(ctx, pod, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expand Down
2 changes: 1 addition & 1 deletion tests/e2e/volume_health_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1868,7 +1868,7 @@ var _ = ginkgo.Describe("Volume health check", func() {
hostIP = psodHostWithPv(ctx, &e2eVSphere, pv.Name)

ginkgo.By("Query CNS volume health status")
err = queryCNSVolumeWithWait(ctx, client, volHandle)
err = queryCNSVolumeWithWait(ctx, volHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

ginkgo.By("Bringing SV API server UP")
Expand Down
Loading

0 comments on commit 4e2f8b0

Please sign in to comment.