Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update base dependency of kube to 1.15.0 #439

Merged
merged 2 commits into from
Jun 24, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
318 changes: 127 additions & 191 deletions Gopkg.lock

Large diffs are not rendered by default.

16 changes: 8 additions & 8 deletions Gopkg.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,23 +16,23 @@
version = "1.10.0"

[[override]]
version = "kubernetes-1.14.2"
version = "kubernetes-1.15.0"
name = "k8s.io/apimachinery"

[[constraint]]
name = "k8s.io/kubernetes"
version = "=v1.14.2"
version = "=v1.15.0"

[[override]]
version = "kubernetes-1.14.2"
version = "kubernetes-1.15.0"
name = "k8s.io/api"

[[override]]
version = "kubernetes-1.14.2"
version = "kubernetes-1.15.0"
name = "k8s.io/apiserver"

[[override]]
version = "kubernetes-1.14.2"
version = "kubernetes-1.15.0"
name = "k8s.io/cli-runtime"

[[override]]
Expand All @@ -41,19 +41,19 @@

[[override]]
name = "k8s.io/client-go"
version = "kubernetes-1.14.2"
version = "kubernetes-1.15.0"

[[override]]
name = "github.com/kubernetes-csi/external-snapshotter"
version = "v1.1.0"

[[override]]
name = "k8s.io/apiextensions-apiserver"
version = "kubernetes-1.14.2"
version = "kubernetes-1.15.0"

[[override]]
name = "k8s.io/kube-aggregator"
version = "kubernetes-1.14.2"
version = "kubernetes-1.15.0"

[prune]
go-tests = true
Expand Down
3 changes: 2 additions & 1 deletion e2e/cephfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
. "github.com/onsi/ginkgo" // nolint

"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)

var (
Expand Down Expand Up @@ -45,7 +46,7 @@ var _ = Describe("cephfs", func() {
for _, file := range cephfsFiles {
res, err := framework.RunKubectl("delete", "-f", cephfsDirPath+file.Name())
if err != nil {
framework.Logf("failed to delete resource in %s with err %v", res, err)
e2elog.Logf("failed to delete resource in %s with err %v", res, err)
}
}
deleteResource(cephfsExamplePath + "secret.yaml")
Expand Down
9 changes: 5 additions & 4 deletions e2e/deploy-rook.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)

var (
Expand All @@ -21,7 +22,7 @@ func formRookURL(version string) {
}

func getK8sClient() kubernetes.Interface {
framework.Logf("Creating a kubernetes client")
e2elog.Logf("Creating a kubernetes client")
client, err := framework.LoadClientset()
Expect(err).Should(BeNil())
return client
Expand Down Expand Up @@ -51,15 +52,15 @@ func deleteFileSystem() {
commonPath := fmt.Sprintf("%s/%s", rookURL, "filesystem-test.yaml")
_, err := framework.RunKubectl("delete", "-f", commonPath)
if err != nil {
framework.Logf("failed to delete file-system %v", err)
e2elog.Logf("failed to delete file-system %v", err)
}
}

func deleteRBDPool() {
commonPath := fmt.Sprintf("%s/%s", rookURL, "pool-test.yaml")
_, err := framework.RunKubectl("delete", "-f", commonPath)
if err != nil {
framework.Logf("failed to delete pool %v", err)
e2elog.Logf("failed to delete pool %v", err)
}
}

Expand Down Expand Up @@ -118,6 +119,6 @@ func tearDownRook() {
commonPath := fmt.Sprintf("%s/%s", rookURL, "common.yaml")
_, err := framework.RunKubectl("delete", "-f", commonPath)
if err != nil {
framework.Logf("failed to delete rook common %v", err)
e2elog.Logf("failed to delete rook common %v", err)
}
}
13 changes: 7 additions & 6 deletions e2e/rbd.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
. "github.com/onsi/ginkgo" // nolint

"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)

var (
Expand Down Expand Up @@ -48,7 +49,7 @@ var _ = Describe("RBD", func() {
for _, file := range rbdFiles {
res, err := framework.RunKubectl("delete", "-f", rbdDirPath+file.Name())
if err != nil {
framework.Logf("failed to delete resource in %s with err %v", res, err)
e2elog.Logf("failed to delete resource in %s with err %v", res, err)
}
}
deleteRBDPool()
Expand Down Expand Up @@ -96,15 +97,15 @@ var _ = Describe("RBD", func() {
}

pvc.Namespace = f.UniqueName
framework.Logf("The PVC template %+v", pvc)
e2elog.Logf("The PVC template %+v", pvc)
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil {
Fail(err.Error())
}
// validate created backend rbd images
images := listRBDImages(f)
if len(images) != 1 {
framework.Logf("backend image count %d expected image count %d", len(images), 1)
e2elog.Logf("backend image count %d expected image count %d", len(images), 1)
Fail("validate backend image failed")
}
snap := getSnapshot(snapshotPath)
Expand All @@ -121,7 +122,7 @@ var _ = Describe("RBD", func() {
Fail(err.Error())
}
if len(snapList) != 1 {
framework.Logf("backend snapshot not matching kube snap count,snap count = % kube snap count %d", len(snapList), 1)
e2elog.Logf("backend snapshot not matching kube snap count,snap count = % kube snap count %d", len(snapList), 1)
Fail("validate backend snapshot failed")
}

Expand Down Expand Up @@ -167,7 +168,7 @@ var _ = Describe("RBD", func() {
// validate created backend rbd images
images := listRBDImages(f)
if len(images) != totalCount {
framework.Logf("backend image creation not matching pvc count, image count = % pvc count %d", len(images), totalCount)
e2elog.Logf("backend image creation not matching pvc count, image count = % pvc count %d", len(images), totalCount)
Fail("validate multiple pvc failed")
}

Expand All @@ -184,7 +185,7 @@ var _ = Describe("RBD", func() {
// validate created backend rbd images
images = listRBDImages(f)
if len(images) > 0 {
framework.Logf("left out rbd backend images count %d", len(images))
e2elog.Logf("left out rbd backend images count %d", len(images))
Fail("validate multiple pvc failed")
}
})
Expand Down
47 changes: 24 additions & 23 deletions e2e/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
)

Expand All @@ -47,13 +48,13 @@ type snapInfo struct {
func waitForDaemonSets(name, ns string, c clientset.Interface, t int) error {
timeout := time.Duration(t) * time.Minute
start := time.Now()
framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start",
e2elog.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start",
timeout, ns)

return wait.PollImmediate(poll, timeout, func() (bool, error) {
ds, err := c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
e2elog.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
if strings.Contains(err.Error(), "not found") {
return false, nil
}
Expand All @@ -64,7 +65,7 @@ func waitForDaemonSets(name, ns string, c clientset.Interface, t int) error {
}
dNum := ds.Status.DesiredNumberScheduled
ready := ds.Status.NumberReady
framework.Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ready, dNum, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds()))
e2elog.Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ready, dNum, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds()))
if ready != dNum {
return false, nil
}
Expand Down Expand Up @@ -97,7 +98,7 @@ func waitForDeploymentComplete(name, ns string, c clientset.Interface, t int) er
}

reason = fmt.Sprintf("deployment status: %#v", deployment.Status)
framework.Logf(reason)
e2elog.Logf(reason)

return false, nil
})
Expand Down Expand Up @@ -206,7 +207,7 @@ func createRBDStorageClass(c kubernetes.Interface, f *framework.Framework) {
Expect(err).Should(BeNil())
}

func newSnapshotClient() (*snapClient.VolumesnapshotV1alpha1Client, error) {
func newSnapshotClient() (*snapClient.SnapshotV1alpha1Client, error) {
config, err := framework.LoadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
Expand Down Expand Up @@ -325,13 +326,13 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
Expect(err).Should(BeNil())
name := pvc.Name
start := time.Now()
framework.Logf("Waiting up to %v to be in Bound state", pvc)
e2elog.Logf("Waiting up to %v to be in Bound state", pvc)

return wait.PollImmediate(poll, timeout, func() (bool, error) {
framework.Logf("waiting for PVC %s (%d seconds elapsed)", pvc.Name, int(time.Since(start).Seconds()))
e2elog.Logf("waiting for PVC %s (%d seconds elapsed)", pvc.Name, int(time.Since(start).Seconds()))
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(name, metav1.GetOptions{})
if err != nil {
framework.Logf("Error getting pvc in namespace: '%s': %v", pvc.Namespace, err)
e2elog.Logf("Error getting pvc in namespace: '%s': %v", pvc.Namespace, err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
Expand Down Expand Up @@ -363,7 +364,7 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
nameSpace := pvc.Namespace
name := pvc.Name
var err error
framework.Logf("Deleting PersistentVolumeClaim %v on namespace %v", name, nameSpace)
e2elog.Logf("Deleting PersistentVolumeClaim %v on namespace %v", name, nameSpace)

pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(name, metav1.GetOptions{})
if err != nil {
Expand All @@ -381,7 +382,7 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
start := time.Now()
return wait.PollImmediate(poll, timeout, func() (bool, error) {
// Check that the PVC is really deleted.
framework.Logf("waiting for PVC %s in state %s to be deleted (%d seconds elapsed)", name, pvc.Status.String(), int(time.Since(start).Seconds()))
e2elog.Logf("waiting for PVC %s in state %s to be deleted (%d seconds elapsed)", name, pvc.Status.String(), int(time.Since(start).Seconds()))
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(name, metav1.GetOptions{})
if err == nil {
return false, nil
Expand Down Expand Up @@ -440,7 +441,7 @@ func getPodName(ns string, c kubernetes.Interface, opt *metav1.ListOptions) stri
func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int) error {
timeout := time.Duration(t) * time.Minute
start := time.Now()
framework.Logf("Waiting up to %v to be in Running state", name)
e2elog.Logf("Waiting up to %v to be in Running state", name)
return wait.PollImmediate(poll, timeout, func() (bool, error) {
pod, err := c.CoreV1().Pods(ns).Get(name, metav1.GetOptions{})
if err != nil {
Expand All @@ -452,7 +453,7 @@ func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int) er
case v1.PodFailed, v1.PodSucceeded:
return false, conditions.ErrPodCompleted
}
framework.Logf("%s app is in %s phase expected to be in Running state (%d seconds elapsed)", name, pod.Status.Phase, int(time.Since(start).Seconds()))
e2elog.Logf("%s app is in %s phase expected to be in Running state (%d seconds elapsed)", name, pod.Status.Phase, int(time.Since(start).Seconds()))
return false, nil
})
}
Expand All @@ -464,14 +465,14 @@ func deletePod(name, ns string, c kubernetes.Interface, t int) error {
return err
}
start := time.Now()
framework.Logf("Waiting for pod %v to be deleted", name)
e2elog.Logf("Waiting for pod %v to be deleted", name)
return wait.PollImmediate(poll, timeout, func() (bool, error) {
_, err := c.CoreV1().Pods(ns).Get(name, metav1.GetOptions{})

if apierrs.IsNotFound(err) {
return true, nil
}
framework.Logf("%s app to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
e2elog.Logf("%s app to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
if err != nil {
return false, err
}
Expand Down Expand Up @@ -503,7 +504,7 @@ func checkCephPods(ns string, c kubernetes.Interface, count, t int, opt *metav1.
return false, err
}

framework.Logf("pod count is %d expected count %d (%d seconds elapsed)", len(podList.Items), count, int(time.Since(start).Seconds()))
e2elog.Logf("pod count is %d expected count %d (%d seconds elapsed)", len(podList.Items), count, int(time.Since(start).Seconds()))

if len(podList.Items) >= count {
return true, nil
Expand Down Expand Up @@ -555,7 +556,7 @@ func validatePVCAndAppBinding(pvcPath, appPath string, f *framework.Framework) {
Fail(err.Error())
}
pvc.Namespace = f.UniqueName
framework.Logf("The PVC template %+v", pvc)
e2elog.Logf("The PVC template %+v", pvc)

app, err := loadApp(appPath)
if err != nil {
Expand All @@ -581,7 +582,7 @@ func validateNormalUserPVCAccess(pvcPath string, f *framework.Framework) {
}
pvc.Namespace = f.UniqueName
pvc.Name = f.UniqueName
framework.Logf("The PVC template %+v", pvc)
e2elog.Logf("The PVC template %+v", pvc)
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil {
Fail(err.Error())
Expand Down Expand Up @@ -660,18 +661,18 @@ func createSnapshot(snap *v1alpha1.VolumeSnapshot, t int) error {
if err != nil {
return err
}
framework.Logf("snapshot with name %v created in %v namespace", snap.Name, snap.Namespace)
e2elog.Logf("snapshot with name %v created in %v namespace", snap.Name, snap.Namespace)

timeout := time.Duration(t) * time.Minute
name := snap.Name
start := time.Now()
framework.Logf("Waiting up to %v to be in Ready state", snap)
e2elog.Logf("Waiting up to %v to be in Ready state", snap)

return wait.PollImmediate(poll, timeout, func() (bool, error) {
framework.Logf("waiting for snapshot %s (%d seconds elapsed)", snap.Name, int(time.Since(start).Seconds()))
e2elog.Logf("waiting for snapshot %s (%d seconds elapsed)", snap.Name, int(time.Since(start).Seconds()))
snaps, err := sclient.VolumeSnapshots(snap.Namespace).Get(name, metav1.GetOptions{})
if err != nil {
framework.Logf("Error getting snapshot in namespace: '%s': %v", snap.Namespace, err)
e2elog.Logf("Error getting snapshot in namespace: '%s': %v", snap.Namespace, err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
Expand Down Expand Up @@ -700,10 +701,10 @@ func deleteSnapshot(snap *v1alpha1.VolumeSnapshot, t int) error {
timeout := time.Duration(t) * time.Minute
name := snap.Name
start := time.Now()
framework.Logf("Waiting up to %v to be deleted", snap)
e2elog.Logf("Waiting up to %v to be deleted", snap)

return wait.PollImmediate(poll, timeout, func() (bool, error) {
framework.Logf("deleting snapshot %s (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
e2elog.Logf("deleting snapshot %s (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
_, err := sclient.VolumeSnapshots(snap.Namespace).Get(name, metav1.GetOptions{})
if err == nil {
return false, nil
Expand Down
4 changes: 2 additions & 2 deletions pkg/rbd/nodeserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ func (ns *NodeServer) mountVolume(req *csi.NodePublishVolumeRequest, devicePath

func (ns *NodeServer) createTargetPath(targetPath string, isBlock bool) (bool, error) {
// Check if that target path exists properly
notMnt, err := ns.mounter.IsNotMountPoint(targetPath)
notMnt, err := mount.IsNotMountPoint(ns.mounter, targetPath)
if err != nil {
if os.IsNotExist(err) {
if isBlock {
Expand Down Expand Up @@ -209,7 +209,7 @@ func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
}
}()

notMnt, err := ns.mounter.IsNotMountPoint(targetPath)
notMnt, err := mount.IsNotMountPoint(ns.mounter, targetPath)
if err != nil {
if os.IsNotExist(err) {
// targetPath has already been deleted
Expand Down
3 changes: 2 additions & 1 deletion pkg/rbd/rbd.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"github.com/container-storage-interface/spec/lib/go/csi"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
nsutil "k8s.io/kubernetes/pkg/volume/util/nsenter"
"k8s.io/utils/exec"
"k8s.io/utils/nsenter"
)
Expand Down Expand Up @@ -89,7 +90,7 @@ func NewNodeServer(d *csicommon.CSIDriver, containerized bool) (*NodeServer, err
if err != nil {
return nil, err
}
mounter = mount.NewNsenterMounter("", ne)
mounter = nsutil.NewMounter("", ne)
}
return &NodeServer{
DefaultNodeServer: csicommon.NewDefaultNodeServer(d),
Expand Down
Loading