Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rebase: bump k8s.io/kubernetes from 1.26.2 to 1.27.2 #3848

Merged
merged 8 commits into from
Jun 6, 2023
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
2 changes: 1 addition & 1 deletion e2e/cephfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ var _ = Describe(cephfsType, func() {
logsCSIPods("app=csi-cephfsplugin", c)

// log all details from the namespace where Ceph-CSI is deployed
e2edebug.DumpAllNamespaceInfo(c, cephCSINamespace)
e2edebug.DumpAllNamespaceInfo(context.TODO(), c, cephCSINamespace)
}
err := deleteConfigMap(cephFSDirPath)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion e2e/cephfs_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ func createCephfsStorageClass(

timeout := time.Duration(deployTimeout) * time.Minute

return wait.PollImmediate(poll, timeout, func() (bool, error) {
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
if err != nil {
framework.Logf("error creating StorageClass %q: %v", sc.Name, err)
Expand Down
15 changes: 7 additions & 8 deletions e2e/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package e2e

import (
"context"
"errors"
"fmt"
"os"
"time"
Expand Down Expand Up @@ -94,7 +93,7 @@ func deleteDeploymentApp(clientSet kubernetes.Interface, name, ns string, deploy
start := time.Now()
framework.Logf("Waiting for deployment %q to be deleted", name)

return wait.PollImmediate(poll, timeout, func() (bool, error) {
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
_, err := clientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
if isRetryableAPIError(err) {
Expand All @@ -118,7 +117,7 @@ func waitForDeploymentInAvailableState(clientSet kubernetes.Interface, name, ns
start := time.Now()
framework.Logf("Waiting up to %q to be in Available state", name)

return wait.PollImmediate(poll, timeout, func() (bool, error) {
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
d, err := clientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
if isRetryableAPIError(err) {
Expand All @@ -145,7 +144,7 @@ func waitForDeploymentComplete(clientSet kubernetes.Interface, name, ns string,
err error
)
timeout := time.Duration(deployTimeout) * time.Minute
err = wait.PollImmediate(poll, timeout, func() (bool, error) {
err = wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
deployment, err = clientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
if isRetryableAPIError(err) {
Expand Down Expand Up @@ -175,7 +174,7 @@ func waitForDeploymentComplete(clientSet kubernetes.Interface, name, ns string,
return false, nil
})

if errors.Is(err, wait.ErrWaitTimeout) {
if wait.Interrupted(err) {
err = fmt.Errorf("%s", reason)
}
if err != nil {
Expand Down Expand Up @@ -311,7 +310,7 @@ func waitForDeploymentUpdateScale(
) error {
t := time.Duration(timeout) * time.Minute
start := time.Now()
err := wait.PollImmediate(poll, t, func() (bool, error) {
err := wait.PollUntilContextTimeout(context.TODO(), poll, t, true, func(_ context.Context) (bool, error) {
scaleResult, upsErr := c.AppsV1().Deployments(ns).UpdateScale(context.TODO(),
deploymentName, scale, metav1.UpdateOptions{})
if upsErr != nil {
Expand Down Expand Up @@ -347,7 +346,7 @@ func waitForDeploymentUpdate(
) error {
t := time.Duration(timeout) * time.Minute
start := time.Now()
err := wait.PollImmediate(poll, t, func() (bool, error) {
err := wait.PollUntilContextTimeout(context.TODO(), poll, t, true, func(_ context.Context) (bool, error) {
_, upErr := c.AppsV1().Deployments(deployment.Namespace).Update(
context.TODO(), deployment, metav1.UpdateOptions{})
if upErr != nil {
Expand Down Expand Up @@ -457,7 +456,7 @@ func waitForContainersArgsUpdate(
// wait for scale to become count
t := time.Duration(timeout) * time.Minute
start := time.Now()
err = wait.PollImmediate(poll, t, func() (bool, error) {
err = wait.PollUntilContextTimeout(context.TODO(), poll, t, true, func(_ context.Context) (bool, error) {
deploy, getErr := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
if getErr != nil {
if isRetryableAPIError(getErr) {
Expand Down
2 changes: 1 addition & 1 deletion e2e/log.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ func logsCSIPods(label string, c clientset.Interface) {
func kubectlLogPod(c clientset.Interface, pod *v1.Pod) {
container := pod.Spec.Containers
for i := range container {
logs, err := frameworkPod.GetPodLogs(c, pod.Namespace, pod.Name, container[i].Name)
logs, err := frameworkPod.GetPodLogs(context.TODO(), c, pod.Namespace, pod.Name, container[i].Name)
if err != nil {
logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container[i].Name)
if err != nil {
Expand Down
4 changes: 2 additions & 2 deletions e2e/namespace.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ func createNamespace(c kubernetes.Interface, name string) error {
return fmt.Errorf("failed to create namespace: %w", err)
}

return wait.PollImmediate(poll, timeout, func() (bool, error) {
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
_, err := c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
framework.Logf("Error getting namespace: '%s': %v", name, err)
Expand All @@ -68,7 +68,7 @@ func deleteNamespace(c kubernetes.Interface, name string) error {
return fmt.Errorf("failed to delete namespace: %w", err)
}

return wait.PollImmediate(poll, timeout, func() (bool, error) {
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
_, err = c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
Expand Down
4 changes: 2 additions & 2 deletions e2e/nfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ func createNFSStorageClass(

timeout := time.Duration(deployTimeout) * time.Minute

return wait.PollImmediate(poll, timeout, func() (bool, error) {
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
if err != nil {
framework.Logf("error creating StorageClass %q: %v", sc.Name, err)
Expand Down Expand Up @@ -294,7 +294,7 @@ var _ = Describe("nfs", func() {
logsCSIPods("app=csi-nfsplugin", c)

// log all details from the namespace where Ceph-CSI is deployed
e2edebug.DumpAllNamespaceInfo(c, cephCSINamespace)
e2edebug.DumpAllNamespaceInfo(context.TODO(), c, cephCSINamespace)
}
err := deleteConfigMap(nfsDirPath)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion e2e/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func checkNodeHasLabel(c kubernetes.Interface, labelKey, labelValue string) erro
return fmt.Errorf("failed to list node: %w", err)
}
for i := range nodes.Items {
e2enode.ExpectNodeHasLabel(c, nodes.Items[i].Name, labelKey, labelValue)
e2enode.ExpectNodeHasLabel(context.TODO(), c, nodes.Items[i].Name, labelKey, labelValue)
}

return nil
Expand Down
10 changes: 5 additions & 5 deletions e2e/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func waitForDaemonSets(name, ns string, c kubernetes.Interface, t int) error {
start := time.Now()
framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", timeout, ns)

return wait.PollImmediate(poll, timeout, func() (bool, error) {
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
ds, err := c.AppsV1().DaemonSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
Expand Down Expand Up @@ -97,7 +97,7 @@ func findPodAndContainerName(f *framework.Framework, ns, cn string, opt *metav1.
podList *v1.PodList
listErr error
)
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
err := wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
podList, listErr = e2epod.PodClientNS(f, ns).List(context.TODO(), *opt)
if listErr != nil {
if isRetryableAPIError(listErr) {
Expand Down Expand Up @@ -215,7 +215,7 @@ func listPods(f *framework.Framework, ns string, opt *metav1.ListOptions) ([]v1.
func execWithRetry(f *framework.Framework, opts *e2epod.ExecOptions) (string, string, error) {
timeout := time.Duration(deployTimeout) * time.Minute
var stdOut, stdErr string
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
err := wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
var execErr error
stdOut, stdErr, execErr = e2epod.ExecWithOptions(f, *opts)
if execErr != nil {
Expand Down Expand Up @@ -353,7 +353,7 @@ func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int, ex
start := time.Now()
framework.Logf("Waiting up to %v to be in Running state", name)

return wait.PollImmediate(poll, timeout, func() (bool, error) {
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
if isRetryableAPIError(err) {
Expand Down Expand Up @@ -402,7 +402,7 @@ func deletePod(name, ns string, c kubernetes.Interface, t int) error {
start := time.Now()
framework.Logf("Waiting for pod %v to be deleted", name)

return wait.PollImmediate(poll, timeout, func() (bool, error) {
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
_, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
if isRetryableAPIError(err) {
Expand Down
65 changes: 38 additions & 27 deletions e2e/pvc.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
start := time.Now()
framework.Logf("Waiting up to %v to be in Bound state", pvc)

return wait.PollImmediate(poll, timeout, func() (bool, error) {
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
framework.Logf("waiting for PVC %s (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
pvc, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
Expand Down Expand Up @@ -89,6 +89,7 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
return false, fmt.Errorf("failed to get pv: %w", err)
}
err = e2epv.WaitOnPVandPVC(
context.TODO(),
c,
&framework.TimeoutContext{ClaimBound: timeout, PVBound: timeout},
namespace,
Expand Down Expand Up @@ -129,7 +130,7 @@ func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v
start := time.Now()

pvcToDelete := pvc
err = wait.PollImmediate(poll, timeout, func() (bool, error) {
err = wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
// Check that the PVC is deleted.
framework.Logf(
"waiting for PVC %s in state %s to be deleted (%d seconds elapsed)",
Expand Down Expand Up @@ -167,7 +168,7 @@ func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v
start = time.Now()
pvToDelete := pv

return wait.PollImmediate(poll, timeout, func() (bool, error) {
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
// Check that the PV is deleted.
framework.Logf(
"waiting for PV %s in state %s to be deleted (%d seconds elapsed)",
Expand Down Expand Up @@ -196,19 +197,24 @@ func getPersistentVolumeClaim(c kubernetes.Interface, namespace, name string) (*
var pvc *v1.PersistentVolumeClaim
var err error
timeout := time.Duration(deployTimeout) * time.Minute
err = wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {
pvc, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
framework.Logf("Error getting pvc %q in namespace %q: %v", name, namespace, err)
if isRetryableAPIError(err) {
return false, nil
err = wait.PollUntilContextTimeout(
context.TODO(),
1*time.Second,
timeout,
true,
func(_ context.Context) (bool, error) {
pvc, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), name, metav1.GetOptions{})
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Instead of creating one more context with context.TODO use-use to context which is passed as input to PoolUnitlContextTimeout? can this be addressed in all other places also?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure, I can do that. Will put that in an additional commit.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@Madhu-1 care to check if the last commit addresses what you mean?

if err != nil {
framework.Logf("Error getting pvc %q in namespace %q: %v", name, namespace, err)
if isRetryableAPIError(err) {
return false, nil
}

return false, fmt.Errorf("failed to get pvc: %w", err)
}

return false, fmt.Errorf("failed to get pvc: %w", err)
}

return true, err
})
return true, err
})

return pvc, err
}
Expand All @@ -219,19 +225,24 @@ func getPersistentVolume(c kubernetes.Interface, name string) (*v1.PersistentVol
var pv *v1.PersistentVolume
var err error
timeout := time.Duration(deployTimeout) * time.Minute
err = wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {
pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
framework.Logf("Error getting pv %q: %v", name, err)
if isRetryableAPIError(err) {
return false, nil
err = wait.PollUntilContextTimeout(
context.TODO(),
1*time.Second,
timeout,
true,
func(_ context.Context) (bool, error) {
pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
framework.Logf("Error getting pv %q: %v", name, err)
if isRetryableAPIError(err) {
return false, nil
}

return false, fmt.Errorf("failed to get pv: %w", err)
}

return false, fmt.Errorf("failed to get pv: %w", err)
}

return true, err
})
return true, err
})

return pv, err
}
Expand Down Expand Up @@ -274,7 +285,7 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
}
start := time.Now()

return wait.PollImmediate(poll, timeout, func() (bool, error) {
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
// Check that the PVC is really deleted.
framework.Logf(
"waiting for PVC %s in state %s to be deleted (%d seconds elapsed)",
Expand Down Expand Up @@ -383,7 +394,7 @@ func getMetricsForPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, t i
// retry as kubelet does not immediately have the metrics available
timeout := time.Duration(t) * time.Minute

return wait.PollImmediate(poll, timeout, func() (bool, error) {
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
if err != nil {
framework.Logf("failed to get metrics for pvc %q (%v): %v", pvc.Name, err, stdErr)
Expand Down
11 changes: 5 additions & 6 deletions e2e/rbd.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package e2e

import (
"context"
"errors"
"fmt"
"strings"
"time"
Expand Down Expand Up @@ -213,7 +212,7 @@ func checkClusternameInMetadata(f *framework.Framework, ns, pool, image string)
stdErr string
execErr error
)
err := wait.PollImmediate(poll, t, func() (bool, error) {
err := wait.PollUntilContextTimeout(context.TODO(), poll, t, true, func(_ context.Context) (bool, error) {
coName, stdErr, execErr = execCommandInToolBoxPod(f,
fmt.Sprintf("rbd image-meta get %s --image=%s %s", rbdOptions(pool), image, clusterNameKey),
ns)
Expand Down Expand Up @@ -356,7 +355,7 @@ var _ = Describe("RBD", func() {
logsCSIPods("app=csi-rbdplugin", c)

// log all details from the namespace where Ceph-CSI is deployed
e2edebug.DumpAllNamespaceInfo(c, cephCSINamespace)
e2edebug.DumpAllNamespaceInfo(context.TODO(), c, cephCSINamespace)
}

err := deleteConfigMap(rbdDirPath)
Expand Down Expand Up @@ -597,7 +596,7 @@ var _ = Describe("RBD", func() {
validateRBDImageCount(f, 1, defaultRBDPool)
validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType)
// create namespace for reattach PVC, deletion will be taken care by framework
ns, err := f.CreateNamespace(reattachPVCNamespace, nil)
ns, err := f.CreateNamespace(context.TODO(), reattachPVCNamespace, nil)
if err != nil {
framework.Failf("failed to create namespace: %v", err)
}
Expand Down Expand Up @@ -1888,7 +1887,7 @@ var _ = Describe("RBD", func() {

timeout := time.Duration(deployTimeout) * time.Minute
var reason string
err = wait.PollImmediate(poll, timeout, func() (bool, error) {
err = wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
var runningAttachCmd string
runningAttachCmd, stdErr, err = execCommandInContainer(
f,
Expand All @@ -1913,7 +1912,7 @@ var _ = Describe("RBD", func() {
return true, nil
})

if errors.Is(err, wait.ErrWaitTimeout) {
if wait.Interrupted(err) {
framework.Failf("timed out waiting for the rbd-nbd process: %s", reason)
}
if err != nil {
Expand Down
Loading